This document discusses using web technologies like WebSockets, WebGL, and device APIs to create immersive interfaces. It provides examples of using WebSockets to sync device orientation across clients in real-time, and using WebGL and three.js to render 3D scenes and objects based on the orientation data. GetUserMedia is demonstrated to access the camera stream and headtrackr.js to track head pose for controlling the 3D camera view.
21. // scene size
var WIDTH = 724, HEIGHT = 512;
// get the DOM element to attach to
var container = $('container');
// create a WebGL renderer, set its size and append it to the DOM
var renderer = new THREE.WebGLRenderer();
renderer.setSize(WIDTH, HEIGHT);
renderer.setClearColorHex(0x111111, 1);
renderer.clear();
container.appendChild(renderer.domElement);
// create a scene
var scene = new THREE.Scene();
22. // camera settings: fov, aspect ratio, near, far
var FOV = 45, ASPECT = WIDTH / HEIGHT, NEAR = 0.1, FAR = 10000;
// create a camera and position camera on z axis (starts at 0,0,0)
var camera = new THREE.PerspectiveCamera( FOV, ASPECT, NEAR, FAR);
camera.position.z = 100;
// add the camera to the scene
scene.add(camera);
// create some lights, position them and add it to the scene
var spotlight = new THREE.SpotLight();
spotlight.position.set( 170, 330, -160 );
scene.add(spotlight);
ambilight = new THREE.AmbientLight(0x333333);
scene.add(ambilight);
//enable shadows on the renderer
renderer.shadowMapEnabled = true;
23. // add an object (teapot) to the scene
var teapot;
var loader = new THREE.JSONLoader(),
createScene = function createScene( geometry ) {
var material = new THREE.MeshFaceMaterial();
teapot = new THREE.Mesh( geometry, material );
teapot.scale.set(8, 8, 8);
teapot.position.set( 0, -10, 0 );
scene.add( teapot );
console.log('matrix ' + teapot.matrix);
console.log('rotation ' + teapot.rotation.x);
};
loader.load('teapot-model.js', createScene );
// draw
renderer.render(scene, camera);
animate();
//animate
function animate() {
requestAnimationFrame(animate);
renderer.render(scene, camera);
}
31. <canvas id="inputCanvas" width="320" height="240"
style="display:none"></canvas>
<video id="inputVideo" autoplay loop></video>
<script>
var videoInput = document.getElementById('inputVideo');
var canvasInput = document.getElementById('inputCanvas');
var htracker = new headtrackr.Tracker();
htracker.init(videoInput, canvasInput);
htracker.start();
</script>
32. // set up camera controller for head-coupled perspective
headtrackr.controllers.three.realisticAbsoluteCameraControl(
camera, 27, [0,0,50], new THREE.Vector3(0,0,0), {damping : 0.5});
* @param {THREE.PerspectiveCamera} camera
* @param {number} scaling size of screen in 3d-model relative to
vertical size of computer screen in real life
* @param {array} fixedPosition array (x,y,z) w/ the position of
the real life screen in the 3d-model space coordinates
* @param {THREE.Vector3} lookAt the object/position the camera
should be pointed towards
* @param {object} params optional object with optional parameters
33. document.addEventListener('headtrackingEvent', function(event) {
scene.fog = new THREE.Fog(0x000000,
1+(event.z*27), 3000+(event.z*27));
}, false);
* x : position of head in cm's right of camera as seen from
users point of view (see figure)
* y : position of head in cm's above camera (see figure)
* z : position of head in cm's distance from camera (see figure)
37. var geometry = new THREE.Geometry();
geometry.vertices.push(
new THREE.Vertex(new THREE.Vector3(0, 0, -80000)));
geometry.vertices.push(new THREE.Vertex(
new THREE.Vector3(0, 0, z)));
var line = new THREE.Line(geometry,
new THREE.LineBasicMaterial({color: 0xeeeeee }));
line.position.x = x;
line.position.y = y;
scene.add(line);