How to map result coordinates to Video Texture coordinates

I am using

  1. Blazeface Tensorflow model for face tracking.
  2. Video Texture to show the video!!!

I’m getting the face landmark coordinates and I’m unable to map them with the video texture.

async function getFace(){
    if(model !== undefined){

        var res = await model;
        
        var pred = await res.estimateFaces(document.querySelector("video"),false);
        if(pred.length === 0){
            face = undefined;
        }else{
            face = pred[0];
        }
    }
}
// update code called every frame
VideoTexture.prototype.update = function(dt) {
    if( this.mVideoTexture ){
        
        this.mVideoTexture.upload();
        const material = this.entity.model.meshInstances[0].material;
        material.emissiveMap = this.mVideoTexture;
        material.update();

        getFace();
         
    }
    if(face != undefined){
        
        var mVideoTexScreen = this.app.root.findByTag("mVideoTex")[0];
        var scales = mVideoTexScreen.getScale();
        var width = scales.x;
        var height = scales.z;

        var topLeft = face.topLeft;
        var topRight = [face.bottomRight[0],face.topLeft[1]];
        
        var sphereEntity = new pc.Entity();
        sphereEntity.addComponent("render", {
            type: 'sphere',
        });
                    
        var tlcords = this.arCamera.camera.screenToWorld(topLeft[0],topLeft[1],this.arCamera.camera.farClip);
        var trcords = this.arCamera.camera.screenToWorld(topRight[0],topRight[1],this.arCamera.camera.farClip);
        
        sphereEntity.setPosition(tlcords.x,tlcords.y,1);

        var ball = this.app.root.findByTag("ball");
        var ballX = ball[0].getPosition().x;
        var ballY = ball[0].getPosition().y;
        var ballCords = this.arCamera.camera.screenToWorld(ballX,ballY,this.arCamera.camera.farClip);
        
        // // console.log(topLeft,topRight,ballX);
        // console.log(ballCords.x,tlcords.x,trcords.x);
        console.log(ballCords.x,ballCords.y,tlcords.y);
        if(
            (ballCords.x*-1 > tlcords.x && ballCords.x*-1 < trcords.x) && 
            trcords.y == ballY){
            // this.app.scripts.input._onTap(ballX,ballY);
            console.log("Tap Now");
            this.ball.script.ball.tap(ballCords.x, ballCords.y);
        }

        
    }
};

Check out the editor
https://playcanvas.com/editor/scene/1392203

Hi @saurabh_kumbhar and welcome,

Can you post some pictures of the issue?

I am running your sample, I can see my video stream from my web camera on the background and the ball model on top. My video stream is a bit distorted on the X axis, that’s the only issue I visually see.

Hello @Leonidas
Thanks for reply
Im using tensorflow blaze face for detecting face landmarks.
N video texture to showcase it
I’m unable to locate the face landmarks on the video textures

Basically I want to map the face landmarks coordinates from the blazeface model to video texture

https://playcanvas.com/editor/project/921479