<html>
<head>
<meta name="viewport" content="width=device-width">
<!-- Allow use of native app.* calls -->
<script src='file:///android_asset/app.js'></script>
<!-- Load TensorFlow.js. and coco-ssd model -->
</head>
<style>
body { color: white; margin:0; text-align:center; }
.frame { text-align:center; }
.layers {
position: absolute; left:0; right:0; margin-left:auto; margin-right:auto;
margin-top: 0; top: 50%; transform: translateY(-50%);
}
</style>
<script>
//Init globals.
var tlast = Date.now()
var model = null
//Lock screen orientation to Portrait.
app.SetOrientation( "Portrait" );
//Called after web page is loaded.
function OnLoad() {
//Load the model.
console.log( "loading model..." )
cocoSsd.load().then( (loadedModel)=> {
console.log( "loaded ("+tf.getBackend()+")" )
//Store model and start camera preview.
model = loadedModel
enableCam()
})
}
// Enable the live webcam view and start classification.
function enableCam(event) {
// Only continue if the COCO-SSD has finished loading.
if (!model) return;
// getUsermedia parameters to force back camera video but not audio.
const constraints = {
audio: false,
video:{ facingMode:'environment', width:{min:800}, height:{min:800} }
}
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
video.srcObject = stream;
video.addEventListener('loadeddata', Detect);
});
}
//Detect objects in canvas.
function Detect() {
//console.log( "detecting" )
if( !canvas._done ) {
//Get canvas context
var c = document.getElementById("canvas");
ctx = c.getContext("2d");
//console.log( "dif: " + (video.videoWidth-video.offsetWidth)/2 )
//canvas.style.marginLeft = (video.videoWidth-video.offsetWidth)/2
canvas.width = video.offsetWidth, canvas.height = video.offsetHeight
ctx.lineWidth = 2
ctx.strokeStyle = ctx.fillStyle = 'red'
ctx.font = "30px Arial"
canvas._done = true
}
//Start classifying objects found in video.
model.detect(video).then( (predictions)=> {
//console.log( predictions.length )
//console.log( video.videoWidth + " x " + video.videoHeight )
//console.log( window.innerWidth + " vs " + video.offsetWidth )
ctx.clearRect(0, 0, canvas.width, canvas.height)
//Loop through predictions and draw them to the canvas.
for (let n = 0; n < predictions.length; n++) {
console.log( "object: " + predictions[n].score.toFixed(2) + ": " + predictions[n].class )
//If we are over 66% sure we know object type.
if( predictions[n].score > 0.66 ) {
//Draw bounding box.
ctx.beginPath()
var bbox = predictions[n].bbox
//console.log( "bbox: " + JSON.stringify(bbox) )
ctx.rect( bbox[0]/video.videoWidth*canvas.width, bbox[1]/video.videoHeight*canvas.height,
bbox[2]/video.videoWidth*canvas.width, bbox[3]/video.videoHeight*canvas.height )
ctx.stroke()
}
}
//Show actual frame rate.
var tnow = Date.now()
ctx.fillText( "fps: " + Math.round(1000/(tnow-tlast)), 10, 25 )
tlast = tnow
//Call this function again when the browser is ready.
window.requestAnimationFrame( Detect )
})
}
</script>
<body onload="OnLoad()">
<div class="frame">
<div id="container" class="layers">
<video id="video" autoplay style="width:90%;"></video>
</div>
<div class="layers">
<canvas id="canvas"></canvas>
</div>
</div>
</body>
</html>