I already have posted I am using colorbuffer from custom shader as I mentioned on the above code. it’s working perfectly fine but when I import GLTF model it’s not working I have done each and every method but nothing happened. Please give me a solution or is there any better approach because I don’t want to use raycast.
here is my color buffer script:
import * as pc from 'playcanvas';
export class ColorBufferPicker {
private app: pc.Application;
private canvas: HTMLCanvasElement;
private colorBuffer: pc.Texture;
private renderTarget: pc.RenderTarget;
private shader: pc.ShaderMaterial;
constructor(app: pc.Application, canvas: HTMLCanvasElement) {
this.app = app;
this.canvas = canvas;
// Create color buffer texture for depth encoding
this.colorBuffer = new pc.Texture(app.graphicsDevice, {
width: app.graphicsDevice.width,
height: app.graphicsDevice.height,
format: pc.PIXELFORMAT_R8_G8_B8_A8,
mipmaps: false
});
// Create render target
this.renderTarget = new pc.RenderTarget({
colorBuffer: this.colorBuffer,
depth: true
});
// Create depth shader
this.shader = this.createDepthShader();
}
private createDepthShader(): pc.ShaderMaterial {
return new pc.ShaderMaterial({
uniqueName: 'depthPickingShader',
attributes: { aPosition: pc.SEMANTIC_POSITION },
vertexGLSL: `
attribute vec3 aPosition;
uniform mat4 matrix_model;
uniform mat4 matrix_viewProjection;
uniform mat4 matrix_view;
uniform float uNearClip;
uniform float uFarClip;
varying float vNormalizedDepth;
void main(void) {
vec4 worldPosition = matrix_model * vec4(aPosition, 1.0);
vec4 viewPosition = matrix_view * worldPosition;
gl_Position = matrix_viewProjection * worldPosition;
// Linear depth in view space (positive)
float linearDepth = -viewPosition.z;
// Normalize depth to 0-1 range for better encoding
vNormalizedDepth = (linearDepth - uNearClip) / (uFarClip - uNearClip);
}
`,
fragmentGLSL: `
precision highp float;
varying float vNormalizedDepth;
// Improved float to RGBA encoding
vec4 float2vec4(float value) {
value = clamp(value, 0.0, 1.0);
const vec4 bitSh = vec4(256.0 * 256.0 * 256.0, 256.0 * 256.0, 256.0, 1.0);
const vec4 bitMsk = vec4(0.0, 1.0 / 256.0, 1.0 / 256.0, 1.0 / 256.0);
vec4 res = fract(value * bitSh);
res -= res.xxyz * bitMsk;
return res;
}
void main(void) {
gl_FragColor = float2vec4(vNormalizedDepth);
}
`
});
}
getWorldPos(event: pc.MouseEvent, camera: pc.Entity, targets: pc.Entity[], range?: number): pc.Vec3 | null {
if (!camera.camera || !targets || targets.length === 0) return null;
// Store original materials and render target - FIX: Handle null materials
const originalMaterials = targets.map(target => {
if (target && target.render && target.render.material) {
return target.render.material;
}
return null;
});
const origRT = camera.camera.renderTarget;
try {
// Set shader uniforms for depth range
const nearClip = camera.camera.nearClip;
const farClip = range || camera.camera.farClip;
this.shader.setParameter('uNearClip', nearClip);
this.shader.setParameter('uFarClip', farClip);
// Apply depth shader to all targets - FIX: Only apply to valid render components
targets.forEach(target => {
if (target && target.render) {
target.render.material = this.shader;
}
});
camera.camera.renderTarget = this.renderTarget;
this.app.render();
// Calculate pixel coordinates once
const rect = this.canvas.getBoundingClientRect();
const pixelX = Math.floor((event.x - rect.left) * (this.colorBuffer.width / this.canvas.clientWidth));
const pixelY = Math.floor((this.canvas.clientHeight - (event.y - rect.top)) * (this.colorBuffer.height / this.canvas.clientHeight));
// Read pixel data
const gl = (this.app.graphicsDevice as any).gl;
gl.bindFramebuffer(gl.FRAMEBUFFER, this.renderTarget.impl._glFrameBuffer);
const pixel = new Uint8Array(4);
gl.readPixels(pixelX, pixelY, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, pixel);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
if (pixel[3] === 0) return null;
// Decode and convert to world position
const normalizedDepth = this.vec4ToFloat(pixel);
const linearDepth = nearClip + normalizedDepth * (farClip - nearClip);
return this.depthToWorldPosition(event, camera, linearDepth);
} finally {
// Restore original materials for all targets - FIX: Handle null materials properly
targets.forEach((target, index) => {
if (target && target.render) {
// Only restore if there was an original material
if (originalMaterials[index] !== null) {
target.render.material = originalMaterials[index];
} else {
// If there was no original material, set to null or create a default one
target.render.material = new pc.StandardMaterial();
}
}
});
camera.camera.renderTarget = origRT;
}
}
private vec4ToFloat(pixel: Uint8Array): number {
// Optimized RGBA to float decoding
const r = pixel[0] * (1.0 / (255.0 * 256.0 * 256.0 * 256.0));
const g = pixel[1] * (1.0 / (255.0 * 256.0 * 256.0));
const b = pixel[2] * (1.0 / (255.0 * 256.0));
const a = pixel[3] * (1.0 / 255.0);
return r + g + b + a;
}
private depthToWorldPosition(event: pc.MouseEvent, camera: pc.Entity, depth: number): pc.Vec3 {
const cam = camera.camera!;
// Calculate NDC coordinates
const rect = this.canvas.getBoundingClientRect();
const ndcX = ((event.x - rect.left) / this.canvas.clientWidth) * 2 - 1;
const ndcY = -(((event.y - rect.top) / this.canvas.clientHeight) * 2 - 1);
// Calculate view space position
const aspect = this.canvas.clientWidth / this.canvas.clientHeight;
const tanHalfFov = Math.tan((cam.fov * Math.PI / 180) * 0.5);
const viewX = ndcX * depth * tanHalfFov * aspect;
const viewY = ndcY * depth * tanHalfFov;
const viewPos = new pc.Vec3(viewX, viewY, -depth);
// Transform to world space
const invViewMatrix = new pc.Mat4().copy(cam.viewMatrix).invert();
const worldPos = new pc.Vec3();
invViewMatrix.transformPoint(viewPos, worldPos);
return worldPos;
}
}```
and here is how I call it in measurement presenter:
private onMouseDown = (e: MouseEvent): void => {
if (e.button !== 0) return; // Only left click
const panel = this.view.getMeasurementPanel();
if (panel?.contains(e.target as Node)) return;
// Temporarily disable camera
const activeCamera = cameraManager.getActiveCamera() as any;
if (activeCamera) {
activeCamera.temporarilyDisabled = true;
}
const pcMouseEvent = { x: e.clientX, y: e.clientY } as pc.MouseEvent;
const range = this.calculatePreciseRange();
console.log(`🎯 Using precise range: ${range.toFixed(1)}`);
// Use targets array instead of single plane
const point = this.colorBufferPicker.getWorldPos(pcMouseEvent, this.camera, this.targets, range);
if (point) {
// Snap to ground level (you might want to adjust this logic for multiple planes)
point.y = this.plane.getPosition().y;
console.log(`✅ World point: (${point.x.toFixed(3)}, ${point.y.toFixed(3)}, ${point.z.toFixed(3)})`);
this.createSphere(point);
const measurementCompleted = this.model.addPoint(point);
if (measurementCompleted) {
const newMeasurement = this.model.measurements[this.model.measurements.length - 1];
if (newMeasurement.points) {
this.view.drawMeasurement(
newMeasurement.points[0],
newMeasurement.points[1],
newMeasurement.distance
);
// Save to localStorage
this.saveMeasurementToStorage(newMeasurement);
}
}
}
// Re-enable camera
setTimeout(() => {
if (activeCamera) {
activeCamera.temporarilyDisabled = false;
}
}, 50);
};```
and here is main.ts where the import model gltf pass:
import * as pc from "playcanvas";
import { ImportExportManager } from "./managers/ImportExportManager";
import { ImportExportView } from "./views/ImportExportView";
import { ImportExportPresenter } from "./presenters/ImportExportPresenter";
import { InputManager } from "../src/managers/InputManager";
import { InputPresenter } from "./presenters/InputPresenter";
import { InputView } from "../src/views/InputView";
import { createEnvironment } from "./presenters/CreateEnvironment";
import { OrbitCamera } from "./cameras/OrbitCamera";
import { FlyCamera } from "./cameras/FlyCamera";
import { WalkCamera } from "./cameras/WalkCamera";
import { CameraManager } from "./managers/CameraManager";
import { CameraView } from "./views/CameraView"; // Add this import at the top
import { IManageable } from "./managers/IManageable";
import { MeasurementManager } from "./managers/MeasurementManager";
import { MeasurementView } from "./views/MeasurementView";
import { MeasurementPresenter } from "./presenters/MeasurementPresenter";
console.log("main.ts loaded");
// Set up the PlayCanvas application
const canvas = document.getElementById("application") as HTMLCanvasElement;
const app = new pc.Application(canvas, { graphicsDeviceOptions: {
antialias: true, // Good
alpha: false,
preserveDrawingBuffer: false,
powerPreference: "high-performance",
depth: true,
stencil: true,
}});
app.setCanvasResolution(pc.RESOLUTION_AUTO);
app.setCanvasFillMode(pc.FILLMODE_FILL_WINDOW);
app.start();
// Scene-wide ambient
app.scene.ambientLight = new pc.Color(0.3, 0.3, 0.3);
// --- Use environment.ts for environment setup ---
const { cameraEntity, ground, ground2 } = createEnvironment(app);
// const boxy = new pc.Entity("boxy");
// boxy.addComponent("render", { type: "box" });
// // ✅ Make sure material is assigned before creating MeasurementPresenter
// const boxyMaterial = new pc.StandardMaterial();
// boxyMaterial.diffuse = new pc.Color(0, 1, 0); // Green color
// boxyMaterial.update();
// if (boxy.render) {
// boxy.render.material = boxyMaterial;
// }
// boxy.setLocalScale(20, 1, 20);
// boxy.setPosition(14, 2, 2);
// app.root.addChild(boxy);
// Create ground3 before loading the model
const ground3 = new pc.Entity("Ground 3");
ground3.addComponent("render", { type: "box" });
ground3.setLocalScale(20, 1, 20); // Same as working project
ground3.setPosition(0, 0, 10);
const boxyMaterial = new pc.StandardMaterial();
boxyMaterial.diffuse = new pc.Color(0, 1, 0); // Green color
boxyMaterial.update();
if (ground3.render) {
ground3.render.material = boxyMaterial;
}
app.root.addChild(ground3);
// Instantiate managers
export const inputManager = new InputManager();
export const inputPresenter = new InputPresenter(inputManager);
export const inputView = new InputView(inputPresenter);
inputView.initialize();
const importExportModel = new ImportExportManager(app);
const importExportView = new ImportExportView("importExportUI"); // Make sure this container exists in your HTML
const importExportPresenter = new ImportExportPresenter(importExportModel, importExportView);
importExportModel.awake();
const orbitCamera = new OrbitCamera(app, cameraEntity);
const flyCamera = new FlyCamera(app, cameraEntity);
const walkCamera = new WalkCamera(app, cameraEntity);
export const cameraManager = new CameraManager(
app,
[orbitCamera, flyCamera, walkCamera],
orbitCamera
);
app.on("update", (dt) => {
cameraManager.update(dt);
});
// Camera management
const cameraManagers = [orbitCamera, flyCamera, walkCamera];
const switchCameraManager = (mgr: IManageable) => {
cameraManager.switchTo(mgr);
};
new CameraView("managersUI", cameraManagers, switchCameraManager);
// Optionally load an initial model from a direct URL
importExportModel.importModel("/models/2CylinderEngine/2CylinderEngine.gltf", "2CylinderEngine.gltf");
const measurementModel = new MeasurementManager();
const measurementView = new MeasurementView(app, "measurementUI");
// Load a GLTF model and add it as an entity with render type 'mesh'
let importedModelEntity: pc.Entity | null = null;
app.assets.loadFromUrl("/models/2CylinderEngine/2CylinderEngine.gltf", "container", (err, asset) => {
if (err) {
console.error("Failed to load GLTF:", err);
return;
}
if (!asset) {
console.error("Asset is undefined.");
return;
}
const container = asset.resource as pc.ContainerResource;
importedModelEntity = container.instantiateRenderEntity();
importedModelEntity.setLocalScale(0.01, 0.01, 0.01);
importedModelEntity.setPosition(0, 0, 4);
// No need to add a render component manually; GLTF import already adds mesh renderers
app.root.addChild(importedModelEntity);
// Pass importedModelEntity as the last argument (boxy) to MeasurementPresenter
new MeasurementPresenter(
measurementModel,
measurementView,
app,
cameraEntity,
ground,
ground2,
importedModelEntity // <--- This is your GLTF model
);
});