WebGL绘制透视视图体积

3
我正尝试计算视锥体平面的8个(4+4)顶点: 近平面和远平面。
我需要这些顶点在webGL中绘制相机的视锥体。
到目前为止,我已经通过从每个角度使用三角函数来计算它们,但是当我绘制顶点时,结果似乎并不准确。
目前为止,我已经得出了以下方程式来计算顶点:
y = sqrt(hypotenuse^2 - plane^2)
x = sqrt(hypotenuse^2 - plane^2)
z = plane (near or far)
有人可以帮忙吗?提前谢谢您。
1个回答

4
你可以通过一个逆投影矩阵来投射一个标准立方体。

const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");

const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
  gl_Position = u_worldViewProjection * position;
}
`;

const fs = `
precision mediump float;
void main() {
  gl_FragColor = vec4(1, 0, 0, 1);
}
`;


const programInfo = twgl.createProgramInfo(gl, [vs, fs]);

const arrays = {
   position: [
      -1,  1, -1,
       1,  1, -1,
       1, -1, -1,
      -1, -1, -1,

      -1,  1,  1,
       1,  1,  1,
       1, -1,  1,
      -1, -1,  1,
  ],
  indices: [
      0, 1, 1, 2, 2, 3, 3, 0,
      4, 5, 5, 6, 6, 7, 7, 4,
      0, 4, 1, 5, 2, 6, 3, 7,
  ],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);

function render(time) {
  time *= 0.001;
  twgl.resizeCanvasToDisplaySize(gl.canvas);
  gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

  gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

  let projectionToViewWith;
  {  
    const fov = 30 * Math.PI / 180;
    const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
    const zNear = 0.5;
    const zFar = 100;
    projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar); 
  }
  let projectionToBeViewed;
  {
    const fov = 30 * Math.PI / 180;
    const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
    const zNear = 2;
    const zFar = 10;
    projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar); 
  }
  const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
  
  const radius = 20;
  const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
  const target = [0, 0, 0];
  const up = [0, 1, 0];
  const camera = m4.lookAt(eye, target, up);
  const view = m4.inverse(camera);

  const viewProjection = m4.multiply(projectionToViewWith, view);
  
  const worldViewProjection = m4.multiply(
      viewProjection,
      inverseProjectionToBeViewed);

  gl.useProgram(programInfo.program);
  twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

  twgl.setUniforms(programInfo, {
    u_worldViewProjection: worldViewProjection,
  });
  twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);

  requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

要获取8个角点的坐标,只需要进行反向投影即可。投影矩阵将视锥体空间转换为以fovy为高、fovy * aspect为宽、从-zNear开始到-zFar结束,并在透视除法后将该空间转换为-1 <-> +1的盒子。
为了倒推并计算出该盒子的顶点,我们只需通过逆投影矩阵将-1到+1的盒子投影并再次进行透视除法(这正是上面示例中发生的事情,我们只是在GPU中完成所有操作)。
因此,我们将其从GPU中取出并在JavaScript中执行。
[
  [-1,  1, -1],
  [ 1,  1, -1],
  [ 1, -1, -1],
  [-1, -1, -1],

  [-1,  1,  1],
  [ 1,  1,  1],
  [ 1, -1,  1],
  [-1, -1,  1],
].forEach((point) => {
  console.log(m4.transformPoint(inverseProjectionMatrix, point));
});

这里有一个例子。

const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");

const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
void main() {
  gl_Position = u_worldViewProjection * position;
  gl_PointSize = 10.;
}
`;

const fs = `
precision mediump float;
uniform vec4 u_color;
void main() {
  gl_FragColor = u_color;
}
`;


const programInfo = twgl.createProgramInfo(gl, [vs, fs]);

const positions = [
    -1,  1, -1,
     1,  1, -1,
     1, -1, -1,
    -1, -1, -1,

    -1,  1,  1,
     1,  1,  1,
     1, -1,  1,
    -1, -1,  1,
];
const arrays = {
   position: positions,
   indices: [
      0, 1, 1, 2, 2, 3, 3, 0,
      4, 5, 5, 6, 6, 7, 7, 4,
      0, 4, 1, 5, 2, 6, 3, 7,
  ],
};
const bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);

function render(time) {
  time *= 0.001;
  twgl.resizeCanvasToDisplaySize(gl.canvas);
  gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);

  gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);

  let projectionToViewWith;
  {  
    const fov = 30 * Math.PI / 180;
    const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
    const zNear = 0.5;
    const zFar = 100;
    projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar); 
  }
  let projectionToBeViewed;
  {
    const fov = 30 * Math.PI / 180;
    const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
    const zNear = 2;
    const zFar = 10;
    projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar); 
  }
  const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
  
  const radius = 20;
  const eye = [Math.sin(time) * radius, 4, Math.cos(time) * radius];
  const target = [0, 0, 0];
  const up = [0, 1, 0];
  const camera = m4.lookAt(eye, target, up);
  const view = m4.inverse(camera);

  const viewProjection = m4.multiply(projectionToViewWith, view);
  
  const worldViewProjection = m4.multiply(
      viewProjection,
      inverseProjectionToBeViewed);

  gl.useProgram(programInfo.program);
  twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);

  twgl.setUniforms(programInfo, {
    u_worldViewProjection: worldViewProjection,
    u_color: [1, 0, 0, 1],
  });
  twgl.drawBufferInfo(gl, bufferInfo, gl.LINES);
  
  // just because I'm lazy let's draw each point one at a time
  // note: since in our case the frustum is not moving we
  // could have computed these at init time. 
  const positionLoc = programInfo.attribSetters.position.location;
  gl.disableVertexAttribArray(positionLoc);

  for (let i = 0; i < positions.length; i += 3) {
    const point = positions.slice(i, i + 3);
    const worldPosition = m4.transformPoint(
        inverseProjectionToBeViewed, point);
    gl.vertexAttrib3f(positionLoc, ...worldPosition);
    twgl.setUniforms(programInfo, {
      u_color: [0, 1, 0, 1],
      u_worldViewProjection: viewProjection,
    });
    gl.drawArrays(gl.POINT, 0, 1);
  }

  requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

您在评论中提到,希望能够在一个画布中展示相机的视锥体。

以上实际上就是这样操作的,除了在canvasWhosFrustumWeWantToRender画布中的相机不动。相反地,它只是坐在原点,向下看着-Z轴,+Y向上。为了使视锥体能够移动以显示其相对于相机的位置,只需添加相机矩阵即可。

const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl");
const ext = gl.getExtension("OES_standard_derivatives");

const vs = `
attribute vec4 position;
uniform mat4 u_worldViewProjection;
varying vec3 v_position;
void main() {
  gl_Position = u_worldViewProjection * position;
  v_position = position.xyz;  // for fake lighting
}
`;

const fs = `
#extension GL_OES_standard_derivatives : enable
precision mediump float;
varying vec3 v_position;
uniform vec4 u_color;
void main() {
  vec3 fdx = dFdx(v_position);
  vec3 fdy = dFdy(v_position);

  vec3 n = normalize(cross(fdx,fdy)); 
  float l = dot(n, normalize(vec3(1,2,-3))) * .5 + .5;  
  gl_FragColor = u_color;
  gl_FragColor.rgb *= l;
}
`;


const programInfo = twgl.createProgramInfo(gl, [vs, fs]);

const arrays = {
   position: [
      -1,  1, -1,
       1,  1, -1,
       1, -1, -1,
      -1, -1, -1,

      -1,  1,  1,
       1,  1,  1,
       1, -1,  1,
      -1, -1,  1,
  ],
  indices: [
      0, 1, 1, 2, 2, 3, 3, 0,
      4, 5, 5, 6, 6, 7, 7, 4,
      0, 4, 1, 5, 2, 6, 3, 7,
  ],
};
const concat = twgl.primitives.concatVertices;
const reorient = twgl.primitives.reorientVertices;
const wireCubeBufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
const solidCubeBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 2);
const cameraBufferInfo = twgl.createBufferInfoFromArrays(gl,
  concat([
     reorient(twgl.primitives.createCubeVertices(2), 
              m4.translation([0, 0, 1])),
     reorient(twgl.primitives.createTruncatedConeVertices(0, 1, 2, 12, 1),
              m4.rotationX(Math.PI * -.5)),
  ])
);

const black = [0, 0, 0, 1];
const blue = [0, 0, 1, 1];

function drawScene(viewProjection, clearColor) {
  gl.clearColor(...clearColor);
  gl.clear(gl.COLOR_BUFFER_BIT);

  const numCubes = 10;
  for (let i = 0; i < numCubes; ++i) {
    const u = i / numCubes;
    let mat = m4.rotationY(u * Math.PI * 2);
    mat = m4.translate(mat, [0, 0, 10]);
    mat = m4.scale(mat, [1, 1 + u * 23 % 1, 1]);
    mat = m4.translate(mat, [0, .5, 0]);
    mat = m4.multiply(viewProjection, mat);
    drawModel(solidCubeBufferInfo, mat, [u, u * 3 % 1, u * 7 % 1,1]);
  }
}

function drawModel(bufferInfo, worldViewProjection, color, mode) {
  twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
  twgl.setUniforms(programInfo, {
    u_worldViewProjection: worldViewProjection,
    u_color: color,
  });
  twgl.drawBufferInfo(gl, bufferInfo, mode);  
}

function render(time) {
  time *= 0.001;
  twgl.resizeCanvasToDisplaySize(gl.canvas);
  const width = gl.canvas.width;
  const height = gl.canvas.height;
  const halfWidth = width / 2;
  gl.viewport(0, 0, width, height);

  gl.disable(gl.SCISSOR_TEST);
  gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
  gl.enable(gl.DEPTH_TEST);

  let projectionToViewWith;  // the projection on the right
  {  
    const fov = 60 * Math.PI / 180;
    const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
    const zNear = 0.5;
    const zFar = 100;
    projectionToViewWith = m4.perspective(fov, aspect, zNear, zFar); 
  }
  let projectionToBeViewed;  // the projeciton on the left
  {
    const fov = 60 * Math.PI / 180;
    const aspect = gl.canvas.clientWidth / 2 / gl.canvas.clientHeight;
    const zNear = 1.5;
    const zFar = 15;
    projectionToBeViewed = m4.perspective(fov, aspect, zNear, zFar); 
  }
  const inverseProjectionToBeViewed = m4.inverse(projectionToBeViewed);
  
  let cameraViewingScene;  // camera for right view
  {
    const t1 = 0;
    const radius = 30;
    const eye = [Math.sin(t1) * radius, 4, Math.cos(t1) * radius];
    const target = [0, 0, 0];
    const up = [0, 1, 0];
    cameraViewingScene = m4.lookAt(eye, target, up);
  }
  
  let cameraInScene;  // camera for left view
  {
    const t1 = time;
    const t2 = time + .4;
    const r1 = 10 + Math.sin(t1);
    const r2 = 10 + Math.sin(t2) * 2;
    const eye = [Math.sin(t1) * r1, 0 + Math.sin(t1) * 4, Math.cos(t1) * r1];
    const target = [Math.sin(t2) * r2, 1 + Math.sin(t2), Math.cos(t2) * r2];
    const up = [0, 1, 0];
    cameraInScene = m4.lookAt(eye, target, up);
  }

  // there's only one shader program so just set it once
  gl.useProgram(programInfo.program);
  
  // draw only on left half of canvas
  gl.enable(gl.SCISSOR_TEST);
  gl.scissor(0, 0, halfWidth, height);
  gl.viewport(0, 0, halfWidth, height);
  
  // draw the scene on the left using the camera inside the scene
  {
     const view = m4.inverse(cameraInScene);
     const viewProjection = m4.multiply(projectionToBeViewed, view);
     drawScene(viewProjection, [.9, 1, .9, 1]);
  }
  
  // draw only on right half of canvas
  gl.scissor(halfWidth, 0, halfWidth, height);
  gl.viewport(halfWidth, 0, halfWidth, height);
  
  // draw the same scene on the right using the camera outside the scene
  {
    const view = m4.inverse(cameraViewingScene);
    const viewProjection = m4.multiply(projectionToViewWith, view);
    drawScene(viewProjection, [.9, 1, 1, 1]);
  
    // draw the in scene camera's frustum
    {
      const world = m4.multiply(cameraInScene, inverseProjectionToBeViewed);
      const worldViewProjection = m4.multiply(viewProjection, world);
      drawModel(wireCubeBufferInfo, worldViewProjection, black, gl.LINES);
    }
    
    // draw the in scene camera's camera model
    {
       const worldViewProjection = m4.multiply(viewProjection, cameraInScene);
       drawModel(cameraBufferInfo, worldViewProjection, blue);
    }
  }  

  requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>


哇,非常感谢。这个概念上实际上更容易理解。 我尝试实现它,但是遇到了一些问题。我有两个画布,我想在canvasA中绘制canvasB相机的截锥体。我尝试了以下方法(如果我错了,请纠正我): 我按照您所说创建了一个标准立方体模型。然后,对于该模型的每个点V,我将其乘以canvasB的InverseProjectionMatrix。这给我一个新点V'',它对应于canvasB视图体积的限制。现在我有了所有正确的点,可以将它们发送到顶点缓冲区。这给了我一个平行六面体对象。 - alejandromumo
你有除以W吗?temp = V * inverseProjectionMatrix; newV = temp.xyz / temp.w - gman
成功了!我之前乘法的顺序错了。我无法感谢你的足够多。 顺便说一句,我一直在关注你的WebGL教程,它们非常有价值。非常感谢! - alejandromumo

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接