is it possible to make a voxel mesher like if any mesh in scene exists you run it you preview the model but then its voxelized like it would make the mesh voxelish and it be blocky, about like something that’s like a converter.
or maybe a shader
Any help please
still waiting
Have you tried Chat GPT? It’s basically suggesting it will help you:
Yes, it’s absolutely possible to create a voxel mesher/converter in PlayCanvas that takes any mesh in the scene and generates a voxelized, blocky version of it. This would essentially be a tool or component that:
- Accesses the mesh data (vertices/triangles)
- Samples or voxelizes the geometry into a 3D grid
- Generates new geometry made up of cubes (voxels)
- Displays the voxel mesh in the scene for previewing or saving
Core Concept:
You’re converting an arbitrary mesh into a voxel representation by “filling” the mesh volume (or surface) with cubes of a certain resolution.
Steps to Build This in PlayCanvas
- Access the Mesh Instance:
javascript
CopyEdit
const meshInstances = entity.model.meshInstances;
const mesh = meshInstances[0].mesh;
- Voxelization Algorithm:
You’ll want to scan a 3D grid (think:x, y, z
loops) across the bounding box of the mesh, and for each voxel cell:
- Sample a point at its center
- Use raycasting or point-in-mesh testing to see if it intersects the original mesh.For surface-only voxelization, you could also:
- Iterate over triangles of the mesh
- Fill voxel cells that overlap/intersect the triangle
- Generate Voxel Cubes (Meshes):
For each filled voxel cell, instantiate a cube (or better, batch geometry together for performance).Example using PlayCanvas primitives:
javascript
CopyEdit
const box = new pc.Entity();
box.addComponent("model", {
type: "box"
});
box.setLocalScale(voxelSize, voxelSize, voxelSize);
box.setPosition(x, y, z);
parent.addChild(box);
Or better yet, dynamically generate a combined mesh of all voxels using PlayCanvas’ pc.createMesh()
API.
4. Add Controls/Preview:
- Let the user select an entity
- Add a UI for voxel resolution (size of voxels)
- Show/hide voxelized vs original mesh
Optimization Tips
- Greedy meshing: Merge adjacent cubes into larger boxes to reduce draw calls
- Surface voxelization: Only voxelize visible surfaces to reduce voxel count
- Use Instancing or GPU methods for better performance on large meshes
Libraries or Inspiration
- MagicaVoxel — inspiration for voxel look
- THREE.js voxelizer — porting ideas to PlayCanvas
- Tools like MeshVoxelizer in Unity can inspire your structure
Runtime or Editor Tool?
You can build this as:
- An in-editor PlayCanvas script: Users press a button and get a voxelized preview
- A runtime feature: Voxelize any mesh in real time (more costly)
Want a Starter Example?
Let me know if you’d like a minimal working PlayCanvas script that:
- Loads a mesh
- Voxelizes it to cubes at a given resolution
- Displays voxel preview
I can write it for you.
okay thanks so much, yea sure I want a starter example
yes please you can write it
well I tried ```var Voxelizer = pc.createScript(‘voxelizer’);
Voxelizer.attributes.add(‘voxelResolution’, { type: ‘number’, default: 16, min: 4, max: 64 });
Voxelizer.prototype.initialize = function () {
if (!this.entity.model) {
console.error(‘Voxelizer requires the entity to have a model component.’);
return;
}
var meshInstances = this.entity.model.meshInstances;
if (!meshInstances || meshInstances.length === 0) {
console.error('No meshInstances found on model.');
return;
}
// Use first meshInstance for voxelization
var meshInstance = meshInstances[0];
var mesh = meshInstance.mesh;
if (!mesh) {
console.error('Mesh not found.');
return;
}
// Extract vertex positions
var vertexBuffer = mesh.vertexBuffer;
var device = this.app.graphicsDevice;
var vertexFormat = vertexBuffer.getFormat();
var posElement = null;
for (var i = 0; i < vertexFormat.elements.length; i++) {
if (vertexFormat.elements[i].name === pc.SEMANTIC_POSITION) {
posElement = vertexFormat.elements[i];
break;
}
}
if (!posElement) {
console.error('Position semantic not found in vertex buffer.');
return;
}
var vertexCount = vertexBuffer.getNumVertices();
var vertexData = new Float32Array(vertexBuffer.lock());
var positions = [];
var posOffset = posElement.offset / 4; // offset in floats
for (var i = 0; i < vertexCount; i++) {
var idx = i * (vertexFormat.size / 4) + posOffset;
positions.push(new pc.Vec3(
vertexData[idx],
vertexData[idx + 1],
vertexData[idx + 2]
));
}
vertexBuffer.unlock();
// Extract indices
var indexBuffer = mesh.indexBuffer[0];
var indexCount = indexBuffer.getNumIndices();
var indices;
if (indexBuffer.format === pc.INDEXFORMAT_UINT16) {
var idxData = new Uint16Array(indexBuffer.lock());
indices = Array.from(idxData);
indexBuffer.unlock();
} else if (indexBuffer.format === pc.INDEXFORMAT_UINT32) {
var idxData = new Uint32Array(indexBuffer.lock());
indices = Array.from(idxData);
indexBuffer.unlock();
} else {
console.error('Unsupported index buffer format.');
return;
}
// Compute bounding box of mesh (for voxel grid)
var aabb = meshInstance.aabb;
if (!aabb) {
console.error('Mesh AABB not found.');
return;
}
this.boundsMin = aabb.getMin();
this.boundsMax = aabb.getMax();
this.positions = positions;
this.indices = indices;
this.voxelize();
};
Voxelizer.prototype.voxelize = function () {
this.voxelSize = new pc.Vec3(
(this.boundsMax.x - this.boundsMin.x) / this.voxelResolution,
(this.boundsMax.y - this.boundsMin.y) / this.voxelResolution,
(this.boundsMax.z - this.boundsMin.z) / this.voxelResolution
);
this.voxelData = [];
for (let x = 0; x < this.voxelResolution; x++) {
this.voxelData[x] = [];
for (let y = 0; y < this.voxelResolution; y++) {
this.voxelData[x][y] = [];
for (let z = 0; z < this.voxelResolution; z++) {
let pt = new pc.Vec3(
this.boundsMin.x + (x + 0.5) * this.voxelSize.x,
this.boundsMin.y + (y + 0.5) * this.voxelSize.y,
this.boundsMin.z + (z + 0.5) * this.voxelSize.z
);
this.voxelData[x][y][z] = this.isPointInsideMesh(pt, this.positions, this.indices);
}
}
}
this.generateMesh();
};
Voxelizer.prototype.isPointInsideMesh = function (point, positions, indices) {
let rayOrigin = point.clone();
let rayDir = new pc.Vec3(1, 0, 0); // Ray along +X
let intersections = 0;
for (let i = 0; i < indices.length; i += 3) {
let i0 = indices[i], i1 = indices[i + 1], i2 = indices[i + 2];
if (i0 < 0 || i0 >= positions.length ||
i1 < 0 || i1 >= positions.length ||
i2 < 0 || i2 >= positions.length) {
continue;
}
let v0 = positions[i0];
let v1 = positions[i1];
let v2 = positions[i2];
if (!v0 || !v1 || !v2) continue;
if (this.rayIntersectsTriangle(rayOrigin, rayDir, v0, v1, v2)) {
intersections++;
}
}
return (intersections % 2) === 1;
};
Voxelizer.prototype.rayIntersectsTriangle = function (origin, dir, v0, v1, v2) {
const EPSILON = 1e-8;
let edge1 = v1.clone().sub(v0);
let edge2 = v2.clone().sub(v0);
let h = dir.clone().cross(edge2);
let a = edge1.dot(h);
if (a > -EPSILON && a < EPSILON) return false; // Parallel
let f = 1.0 / a;
let s = origin.clone().sub(v0);
let u = f * s.dot(h);
if (u < 0.0 || u > 1.0) return false;
let q = s.clone().cross(edge1);
let v = f * dir.dot(q);
if (v < 0.0 || u + v > 1.0) return false;
let t = f * edge2.dot(q);
return t > EPSILON;
};
Voxelizer.prototype.generateMesh = function () {
if (this.voxelEntity) {
this.voxelEntity.destroy();
this.voxelEntity = null;
}
this.voxelEntity = new pc.Entity('VoxelMesh');
var positions = [];
var indices = [];
var vertexOffset = 0;
for (let x = 0; x < this.voxelResolution; x++) {
for (let y = 0; y < this.voxelResolution; y++) {
for (let z = 0; z < this.voxelResolution; z++) {
if (this.voxelData[x][y][z]) {
let center = new pc.Vec3(
this.boundsMin.x + x * this.voxelSize.x + this.voxelSize.x / 2,
this.boundsMin.y + y * this.voxelSize.y + this.voxelSize.y / 2,
this.boundsMin.z + z * this.voxelSize.z + this.voxelSize.z / 2
);
positions.push(...this.getCubeVertices(center, this.voxelSize));
indices.push(...this.getCubeIndices(vertexOffset));
vertexOffset += 8;
}
}
}
}
if (positions.length === 0) {
console.warn('No voxels generated.');
return;
}
const device = this.app.graphicsDevice;
const vertexFormat = new pc.VertexFormat(device, [
{ semantic: pc.SEMANTIC_POSITION, components: 3, type: pc.TYPE_FLOAT32 }
]);
const vertexBuffer = new pc.VertexBuffer(device, vertexFormat, positions.length / 3);
const vertexData = new Float32Array(vertexBuffer.lock());
vertexData.set(positions);
vertexBuffer.unlock();
const indexBuffer = new pc.IndexBuffer(device, pc.INDEXFORMAT_UINT16, indices.length);
const indexData = new Uint16Array(indexBuffer.lock());
indexData.set(indices);
indexBuffer.unlock();
const mesh = new pc.Mesh();
mesh.vertexBuffer = vertexBuffer;
mesh.indexBuffer[0] = indexBuffer;
mesh.primitive[0].type = pc.PRIMITIVE_TRIANGLES;
mesh.primitive[0].base = 0;
mesh.primitive[0].count = indices.length;
var material = new pc.StandardMaterial();
material.diffuse.set(0, 1, 0);
material.update();
this.voxelEntity.addComponent('model');
this.voxelEntity.model.meshInstances = [new pc.MeshInstance(mesh, material)];
this.app.root.addChild(this.voxelEntity);
};
Voxelizer.prototype.getCubeVertices = function (center, size) {
const sx = size.x / 2;
const sy = size.y / 2;
const sz = size.z / 2;
return [
center.x - sx, center.y - sy, center.z - sz,
center.x + sx, center.y - sy, center.z - sz,
center.x + sx, center.y + sy, center.z - sz,
center.x - sx, center.y + sy, center.z - sz,
center.x - sx, center.y - sy, center.z + sz,
center.x + sx, center.y - sy, center.z + sz,
center.x + sx, center.y + sy, center.z + sz,
center.x - sx, center.y + sy, center.z + sz
];
};
Voxelizer.prototype.getCubeIndices = function (offset) {
return [
offset, offset + 1, offset + 2, offset, offset + 2, offset + 3,
offset + 1, offset + 5, offset + 6, offset + 1, offset + 6, offset + 2,
offset + 5, offset + 4, offset + 7, offset + 5, offset + 7, offset + 6,
offset + 4, offset, offset + 3, offset + 4, offset + 3, offset + 7,
offset + 3, offset + 2, offset + 6, offset + 3, offset + 6, offset + 7,
offset + 4, offset + 1, offset, offset + 4, offset + 5, offset + 1
];
};
Just to be clear. AI said that. Not me
Oh
Still confused
Just tried shaders, I asked ChatGPT for help with this as I am still learning GLSL.
First, create 2 shader assets, one called pixelate.frag (this will be your fragment part) and pixelate.vert (this will be your vertex part)
For pixelate.frag
precision mediump float;
uniform sampler2D uColorBuffer;
uniform float uPixelSize;
varying vec2 vUv0;
void main(void) {
// Snap UVs to a grid
vec2 blockUV = floor(vUv0 / uPixelSize) * uPixelSize;
gl_FragColor = texture2D(uColorBuffer, blockUV);
}
uColorBuffer
is the rendered scene, passed in as a texture.vUv0
is the exact UV (like “pixel 312, 548”) we’d normally sample.floor(vUv0 / uPixelSize)
→ snaps the UV to a grid coordinate (like “block #14”).- Multiplying back by
uPixelSize
→ turns that into a UV that samples the top-left corner of that block. - Every pixel in that block uses the same color → result: chunky pixelation.
So the fragment shader is faking a low-resolution sample and then stretching it up.
For pixelate.vert:
attribute vec2 aPosition;
varying vec2 vUv0;
void main(void) {
gl_Position = vec4(aPosition, 0.0, 1.0);
// Convert from [-1,1] clip space to [0,1] UV space
vUv0 = (aPosition + 1.0) * 0.5;
}
- aPosition → the corners of a fullscreen quad (usually in clip space from -1 to +1).
- We pass them through as
gl_Position
to draw across the whole screen. - We also map them into
[0,1]
range → that becomes our UV coordinates (vUv0
).- Think of it as: “where on the screen am I?” so the fragment shader can sample the right place in the texture.
So the vertex shader is just setting up coordinates for the fragment shader to use.
Now for the actual script:
class PixelateEffect extends pc.PostEffect {
constructor(graphicsDevice, vs, fs) {
super(graphicsDevice);
this.shader = new pc.Shader(graphicsDevice, {
attributes: {
aPosition: pc.SEMANTIC_POSITION
},
vshader: vs,
fshader: fs
});
// Default pixel size (in UV units, will be updated later)
this.pixelSize = 1.0 / 100.0;
}
render(inputTarget, outputTarget, rect) {
const scope = this.device.scope;
// Scene color buffer
scope.resolve("uColorBuffer").setValue(inputTarget.colorBuffer);
// Pixel size uniform
scope.resolve("uPixelSize").setValue(this.pixelSize);
pc.drawFullscreenQuad(this.device, outputTarget, this.vertexBuffer, this.shader, rect);
}
}
var Pixelate = pc.createScript('pixelate');
// Shaders
Pixelate.attributes.add('vs', {
type: 'asset',
assetType: 'shader',
title: 'Vertex Shader'
});
Pixelate.attributes.add('fs', {
type: 'asset',
assetType: 'shader',
title: 'Fragment Shader'
});
// Add attribute for resolution (controls blockiness)
Pixelate.attributes.add('blockResolution', {
type: 'number',
default: 100,
title: 'Blocks Across Screen'
});
Pixelate.prototype.initialize = function() {
const effect = new PixelateEffect(this.app.graphicsDevice, this.vs.resource, this.fs.resource);
// Convert resolution to pixel size in UV space
const screenWidth = this.app.graphicsDevice.width;
effect.pixelSize = 1.0 / this.blockResolution;
const queue = this.entity.camera.postEffects;
queue.addEffect(effect);
this.on('enable', () => queue.addEffect(effect, false));
this.on('disable', () => queue.removeEffect(effect));
// Save effect for later tweaking
this.effect = effect;
};
- You expose
blockResolution
to the Editor so you can tweak it (e.g. 50, 100, 200). - You convert that into
pixelSize
(in UV space), which the shader needs. - You attach the effect to the camera’s post-processing stack, so it applies after the scene is rendered.
So the script is the bridge: it takes your editor values, sets up the shader, and tells PlayCanvas when to use it.
thx looks good its pixely by not 3D blockish what I’m saying try using raymarching or raycasting