I managed to technically fix the resolution crushing issue by adding this code to the top of my Screenshot code to dynamically inject “fixed” functions
pc.PostEffectQueue.prototype.targetWidth = function() {
let rt = this.camera.renderTarget;
if (rt !== null && rt !== undefined) {
if (rt.isOffscreenTarget) {
rt = this.destinationRenderTarget;
}
}
if (rt !== null) {
return rt.width;
}
else {
return this.app.graphicsDevice.width;
}
};
pc.PostEffectQueue.prototype.targetHeight = function() {
let rt = this.camera.renderTarget;
if (rt !== null && rt !== undefined) {
if (rt.isOffscreenTarget) {
rt = this.destinationRenderTarget;
}
}
if (rt !== null) {
return rt.height;
}
else {
return this.app.graphicsDevice.height;
}
};
pc.PostEffectQueue.prototype._allocateColorBuffer = function(format, name) {
console.log('Allocating ColorBuffer: '+name);
const rect = this.camera.rect;
const width = Math.floor(rect.z * this.targetWidth() * this.renderTargetScale);
const height = Math.floor(rect.w * this.targetHeight() * this.renderTargetScale);
const colorBuffer = new pc.Texture(this.app.graphicsDevice, {
name: name,
format: format,
width: width,
height: height,
mipmaps: false,
minFilter: pc.FILTER_NEAREST,
magFilter: pc.FILTER_NEAREST,
addressU: pc.ADDRESS_CLAMP_TO_EDGE,
addressV: pc.ADDRESS_CLAMP_TO_EDGE
});
return colorBuffer;
};
pc.PostEffectQueue.prototype._createOffscreenTarget = function(useDepth, hdr) {
const device = this.app.graphicsDevice;
const format = hdr ? device.getHdrFormat() : pc.PIXELFORMAT_R8_G8_B8_A8;
const name = this.camera.entity.name + '-posteffect-' + this.effects.length;
const colorBuffer = this._allocateColorBuffer(format, name);
const useStencil = this.app.graphicsDevice.supportsStencil;
const samples = useDepth ? device.samples : 1;
let rt = new pc.RenderTarget({
colorBuffer: colorBuffer,
depth: useDepth,
stencil: useStencil,
samples: samples
});
rt.isOffscreenTarget = true;
return rt;
};
Basically all this does is check for an existing renderTarget and use that size if one is found. Otherwise we default to the device size.
The issue I’m running into now is that the SSAO script draws the added shadows way too large on the screenshot:
I also made these changes to the posteffect-ssao.js script from github:
// // Render targets
// let width = graphicsDevice.width;
// let height = graphicsDevice.height;
// // var width = graphicsDevice.width;
// // var height = graphicsDevice.height;
// var colorBuffer = new pc.Texture(graphicsDevice, {
// format: pc.PIXELFORMAT_R8_G8_B8_A8,
// minFilter: pc.FILTER_LINEAR,
// magFilter: pc.FILTER_LINEAR,
// addressU: pc.ADDRESS_CLAMP_TO_EDGE,
// addressV: pc.ADDRESS_CLAMP_TO_EDGE,
// width: width,
// height: height,
// mipmaps: false
// });
// colorBuffer.name = 'ssao';
// this.target = new pc.RenderTarget({
// colorBuffer: colorBuffer,
// depth: false
// });
// Uniforms
this.radius = 4;
this.brightness = 0;
this.samples = 20;
}
SSAOEffect.prototype = Object.create(pc.PostEffect.prototype);
SSAOEffect.prototype.constructor = SSAOEffect;
Object.assign(SSAOEffect.prototype, {
resizeRenderTarget: function(inputTarget) {
if (this.target === undefined || (this.target.width !== inputTarget.width || this.target.height !== inputTarget.height)) {
// Render targets
// var width = graphicsDevice.width;
// var height = graphicsDevice.height;
var colorBuffer = new pc.Texture(pc.app.graphicsDevice, {
format: pc.PIXELFORMAT_R8_G8_B8_A8,
minFilter: pc.FILTER_LINEAR,
magFilter: pc.FILTER_LINEAR,
addressU: pc.ADDRESS_CLAMP_TO_EDGE,
addressV: pc.ADDRESS_CLAMP_TO_EDGE,
width: inputTarget.width,
height: inputTarget.height,
mipmaps: false
});
colorBuffer.name = 'ssao';
this.target = new pc.RenderTarget({
colorBuffer: colorBuffer,
depth: false
});
}
},
render: function (inputTarget, outputTarget, rect) {
var device = this.device;
var scope = device.scope;
this.resizeRenderTarget(inputTarget);
let width = this.target.width;
let height = this.target.height;
var sampleCount = this.samples;
var spiralTurns = 10.0;
var step = (1.0 / (sampleCount - 0.5)) * spiralTurns * 2.0 * 3.141;
var radius = this.radius;
var bias = 0.001;
var peak = 0.1 * radius;
var intensity = (peak * 2.0 * 3.141) * 0.125;
var projectionScale = 0.1 * height;
var cameraFarClip = this.ssaoScript.entity.camera.farClip;
scope.resolve("uAspect").setValue(width / height);
scope.resolve("uResolution").setValue([width, height, 1.0 / width, 1.0 / height]);
scope.resolve("uColorBuffer").setValue(inputTarget.colorBuffer);
scope.resolve("uBrightness").setValue(this.brightness);
scope.resolve("uInvFarPlane").setValue(1.0 / cameraFarClip);
scope.resolve("uSampleCount").setValue([sampleCount, 1.0 / sampleCount]);
scope.resolve("uSpiralTurns").setValue(spiralTurns);
scope.resolve("uAngleIncCosSin").setValue([Math.cos(step), Math.sin(step)]);
scope.resolve("uMaxLevel").setValue(0.0);
scope.resolve("uInvRadiusSquared").setValue(1.0 / (radius * radius));
scope.resolve("uMinHorizonAngleSineSquared").setValue(0.0);
scope.resolve("uBias").setValue(bias);
scope.resolve("uPeak2").setValue(peak * peak);
scope.resolve("uIntensity").setValue(intensity);
scope.resolve("uPower").setValue(1.0);
scope.resolve("uProjectionScaleRadius").setValue(projectionScale * radius);
pc.drawFullscreenQuad(device, this.target, this.vertexBuffer, this.ssaoShader, rect);
scope.resolve("uSSAOBuffer").setValue(this.target.colorBuffer);
// scope.resolve("uFarPlaneOverEdgeDistance").setValue(cameraFarClip / bilateralThreshold);
scope.resolve("uFarPlaneOverEdgeDistance").setValue(1);
scope.resolve("uBilatSampleCount").setValue(4);
pc.drawFullscreenQuad(device, outputTarget, this.vertexBuffer, this.blurShader, rect);
}
});
Instead of creating the colorBuffer when the shader is created, I now create a new one anytime the input buffer changes size and use that size instead of device size when setting the scope parameters. In theory this should work, from what I can tell I’m just drawing the quad wrong somehow.