I am using
- Blazeface Tensorflow model for face tracking.
- Video Texture to show the video!!!
I’m getting the face landmark coordinates and I’m unable to map them with the video texture.
async function getFace(){
if(model !== undefined){
var res = await model;
var pred = await res.estimateFaces(document.querySelector("video"),false);
if(pred.length === 0){
face = undefined;
}else{
face = pred[0];
}
}
}
// update code called every frame
VideoTexture.prototype.update = function(dt) {
if( this.mVideoTexture ){
this.mVideoTexture.upload();
const material = this.entity.model.meshInstances[0].material;
material.emissiveMap = this.mVideoTexture;
material.update();
getFace();
}
if(face != undefined){
var mVideoTexScreen = this.app.root.findByTag("mVideoTex")[0];
var scales = mVideoTexScreen.getScale();
var width = scales.x;
var height = scales.z;
var topLeft = face.topLeft;
var topRight = [face.bottomRight[0],face.topLeft[1]];
var sphereEntity = new pc.Entity();
sphereEntity.addComponent("render", {
type: 'sphere',
});
var tlcords = this.arCamera.camera.screenToWorld(topLeft[0],topLeft[1],this.arCamera.camera.farClip);
var trcords = this.arCamera.camera.screenToWorld(topRight[0],topRight[1],this.arCamera.camera.farClip);
sphereEntity.setPosition(tlcords.x,tlcords.y,1);
var ball = this.app.root.findByTag("ball");
var ballX = ball[0].getPosition().x;
var ballY = ball[0].getPosition().y;
var ballCords = this.arCamera.camera.screenToWorld(ballX,ballY,this.arCamera.camera.farClip);
// // console.log(topLeft,topRight,ballX);
// console.log(ballCords.x,tlcords.x,trcords.x);
console.log(ballCords.x,ballCords.y,tlcords.y);
if(
(ballCords.x*-1 > tlcords.x && ballCords.x*-1 < trcords.x) &&
trcords.y == ballY){
// this.app.scripts.input._onTap(ballX,ballY);
console.log("Tap Now");
this.ball.script.ball.tap(ballCords.x, ballCords.y);
}
}
};
Check out the editor
https://playcanvas.com/editor/scene/1392203