将GLSL mat4分解为原始的RTS值以在顶点着色器中计算视图UV偏移量。

3
我需要获取模型和相机之间的旋转差异,并将值转换为弧度/角度,然后传递给片段着色器。
为此,我需要分解模型旋转矩阵,可能还需要相机视图矩阵。我似乎找不到一个适合在着色器内部进行分解的机制。
旋转细节进入片段着色器以计算UV偏移量。
original_rotation + viewing_angles 用于计算以下纹理的精灵偏移,作为广告牌显示。
最终,UV应该向下偏移(例如:从H3到A3,从上向下看),向上偏移(例如:从A3到H3,从上向下看),从左到右看,从侧面向前看(例如:从D1到D8,反之亦然)。

texture to offset

    const vertex_shader = `
    precision highp float;
    uniform mat4 modelViewMatrix;
    uniform mat4 projectionMatrix;
    attribute vec3 position;
    attribute vec2 uv;
    attribute mat4 instanceMatrix;
    attribute float index;
    attribute float texture_index;
    uniform vec2 rows_cols;
    uniform vec3 camera_location;

    varying float vTexIndex;
    varying vec2 vUv;
    varying vec4 transformed_normal;


        float normal_to_orbit(vec3 rotation_vector, vec3 view_vector){

            rotation_vector = normalize(rotation_vector);
            view_vector = normalize(view_vector);
            vec3 x_direction = vec3(1.0,0,0);
            vec3 y_direction = vec3(0,1.0,0);
            vec3 z_direction = vec3(0,0,1.0);

            float rotation_x_length = dot(rotation_vector, x_direction);
            float rotation_y_length = dot(rotation_vector, y_direction);
            float rotation_z_length = dot(rotation_vector, z_direction);

            float view_x_length = dot(view_vector, x_direction);
            float view_y_length = dot(view_vector, y_direction);
            float view_z_length = dot(view_vector, z_direction);

            //TOP
            float top_rotation = degrees(atan(rotation_x_length, rotation_z_length));
            float top_view = degrees(atan(view_x_length, view_z_length));
            float top_final = top_view-top_rotation;
            float top_idx = floor(top_final/(360.0/rows_cols.x));
            //FRONT
            float front_rotation = degrees(atan(rotation_x_length, rotation_z_length));
            float front_view = degrees(atan(view_x_length, view_z_length));
            float front_final = front_view-front_rotation;
            float front_idx = floor(front_final/(360.0/rows_cols.y));

            return abs((front_idx*rows_cols.x)+top_idx);
        }

    vec3 extractEulerAngleXYZ(mat4 mat) {
        vec3 rotangles = vec3(0,0,0);
        rotangles.x = atan(mat[2].z, -mat[1].z);
        float cosYangle = sqrt(pow(mat[0].x, 2.0) + pow(mat[0].y, 2.0));
        rotangles.y = atan(cosYangle, mat[0].z);
        float sinXangle = sin(rotangles.x);
        float cosXangle = cos(rotangles.x);
        rotangles.z = atan(cosXangle * mat[1].y + sinXangle * mat[2].y, cosXangle * mat[1].x + sinXangle * mat[2].x);
        return rotangles;
    }

float view_index(vec3 position, mat4 mv_matrix, mat4 rot_matrix){
    vec4 posInView = mv_matrix * vec4(0.0, 0.0, 0.0, 1.0);
    // posInView /= posInView[3];
    vec3 VinView = normalize(-posInView.xyz); // (0, 0, 0) - posInView
    // vec4 NinView = normalize(rot_matrix * vec4(0.0, 0.0, 1.0, 1.0));
    // float NdotV = dot(NinView, VinView);
    vec4 view_normal = rot_matrix * vec4(VinView.xyz, 1.0);
    float view_x_length = dot(view_normal.xyz, vec3(1.0,0,0));
    float view_y_length = dot(view_normal.xyz, vec3(0,1.0,0));
    float view_z_length = dot(view_normal.xyz, vec3(0,0,1.0));
    // float radians = atan(-view_x_length, -view_z_length);
    float radians = atan(view_x_length, view_z_length);
    // float angle = radians/PI*180.0 + 180.0;
    float angle = degrees(radians);
    if (radians < 0.0) { angle += 360.0;  }
    if (0.0<=angle && angle<=360.0){
        return floor(angle/(360.0/rows_cols.x));
    }
    return 0.0;

}

    void main(){
        vec4 original_normal = vec4(0.0, 0.0, 1.0, 1.0);
        // transformed_normal = modelViewMatrix * instanceMatrix * original_normal;
        vec3 rotangles = extractEulerAngleXYZ(modelViewMatrix * instanceMatrix);
        // transformed_normal = vec4(rotangles.xyz, 1.0);
        transformed_normal = vec4(camera_location.xyz, 1.0);


        vec4 v = (modelViewMatrix* instanceMatrix* vec4(0.0, 0.0, 0.0, 1.0)) + vec4(position.x, position.y, 0.0, 0.0) * vec4(1.0, 1.0, 1.0, 1.0);
        vec4 model_center = (modelViewMatrix* instanceMatrix* vec4(0.0, 0.0, 0.0, 1.0));
        vec4 model_normal = (modelViewMatrix* instanceMatrix* vec4(0.0, 0.0, 1.0, 1.0));
        vec4 cam_loc = vec4(camera_location.xyz, 1.0);
        vec4 view_vector = normalize((cam_loc-model_center));
        //float findex = normal_to_orbit(model_normal.xyz, view_vector.xyz);
   float findex = view_index(position, base_matrix, combined_rot);

        vTexIndex = texture_index;
        vUv = vec2(mod(findex,rows_cols.x)/rows_cols.x, floor(findex/rows_cols.x)/rows_cols.y) + (uv / rows_cols);
        //vUv = vec2(mod(index,rows_cols.x)/rows_cols.x, floor(index/rows_cols.x)/rows_cols.y) + (uv / rows_cols);

        gl_Position = projectionMatrix * v;
        // gl_Position = projectionMatrix * modelViewMatrix * instanceMatrix * vec4(position, 1.0);
    }
    `
  const fragment_shader = (texture_count) => {
    var fragShader = `
          precision highp float;
          uniform sampler2D textures[${texture_count}];
          varying float vTexIndex;
          varying vec2 vUv;
          varying vec4 transformed_normal;

          void main() {
              vec4 finalColor;
              `;
    for (var i = 0; i < texture_count; i++) {
      if (i == 0) {
        fragShader += `if (vTexIndex < ${i}.5) {
                  finalColor = texture2D(textures[${i}], vUv);
                  }
                `
      } else {
        fragShader += `else if (vTexIndex < ${i}.5) {
                  finalColor = texture2D(textures[${i}], vUv);
                  }
                `
      }
    }
    //fragShader += `gl_FragColor = finalColor * transformed_normal; }`;
    fragShader += `gl_FragColor = finalColor; }`;
    // fragShader += `gl_FragColor = startColor * finalColor; }`;   
    // int index = int(v_TexIndex+0.5); //https://stackoverflow.com/questions/60896915/texture-slot-not-getting-picked-properly-in-shader-issue
    //console.log('frag shader: ', fragShader)
    return fragShader;
  }

  function reset_instance_positions() {
    const dummy = new THREE.Object3D();
    const offset = 500*4
    for (var i = 0; i < max_instances; i++) {
      dummy.position.set(offset-(Math.floor(i % 8)*500), offset-(Math.floor(i / 8)*500), 0);
      dummy.updateMatrix();
      mesh.setMatrixAt(i, dummy.matrix);
    }
    mesh.instanceMatrix.needsUpdate = true;
  }

  function setup_geometry() {
    const geometry = new THREE.InstancedBufferGeometry().copy(new THREE.PlaneBufferGeometry(400, 400));
    const index = new Float32Array(max_instances * 1); // index
    for (let i = 0; i < max_instances; i++) {
      index[i] = (i % max_instances) * 1.0 /* index[i] = 0.0  */
    }
    geometry.setAttribute("index", new THREE.InstancedBufferAttribute(index, 1));
    const texture_index = new Float32Array(max_instances * 1); // texture_index
    const max_maps = 1
    for (let i = 0; i < max_instances; i++) {
      texture_index[i] = (Math.floor(i / max_instances) % max_maps) * 1.0 /* index[i] = 0.0  */
    }
    geometry.setAttribute("texture_index", new THREE.InstancedBufferAttribute(texture_index, 1));
    const textures = [texture]
    const grid_xy = new THREE.Vector2(8, 8)
    mesh = new THREE.InstancedMesh(geometry,
      new THREE.RawShaderMaterial({
        uniforms: {
          textures: {
            type: 'tv',
            value: textures
          },
          rows_cols: {
            value: new THREE.Vector2(grid_xy.x * 1.0, grid_xy.y * 1.0)
          },
          camera_location: {
            value: camera.position
          }
        },
        vertexShader: vertex_shader,
        fragmentShader: fragment_shader(textures.length),
        side: THREE.DoubleSide,
        // transparent: true,
      }), max_instances);
    scene.add(mesh);
    reset_instance_positions()
  }

  var camera, scene, mesh, renderer;
  const max_instances = 64

  function init() {

    camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight,1, 10000 );
    camera.position.z = 1024;

    scene = new THREE.Scene();
    scene.background = new THREE.Color(0xffffff);

    setup_geometry()
    var canvas = document.createElement('canvas');
    var context = canvas.getContext('webgl2');

    renderer = new THREE.WebGLRenderer({
      canvas: canvas,
      context: context
    });
    renderer.setPixelRatio(window.devicePixelRatio);
    renderer.setSize(window.innerWidth, window.innerHeight);
    document.body.appendChild(renderer.domElement);

    window.addEventListener('resize', onWindowResize, false);

    var controls = new THREE.OrbitControls(camera, renderer.domElement);
  }

  function onWindowResize() {

    camera.aspect = window.innerWidth / window.innerHeight;
    camera.updateProjectionMatrix();

    renderer.setSize(window.innerWidth, window.innerHeight);

  }

  function animate() {
    requestAnimationFrame(animate);

    renderer.render(scene, camera);
  }
  var dataurl = "https://istack.dev59.com/accaU.webp"

  var texture;
  var imageElement = document.createElement('img');
  imageElement.onload = function(e) {
    texture = new THREE.Texture(this);
    texture.needsUpdate = true;
    init();
    animate();
  };
  imageElement.src = dataurl;

迄今为止的工作JSFiddle


评论不适合进行长时间的讨论;此对话已被移至聊天室 - Samuel Liew
1个回答

2
您拥有一个应用于xy平面QUAD的4x4变换矩阵M,并希望以“重复”方式将其4个角(p0,p1,p2,p3)映射到纹理上(从左/右/上/下穿过边界将返回右/左/下/上),基于矩阵Z轴方向。您面临两个问题:
1. M旋转是3自由度,而您只需要2自由度(偏航,俯仰),因此如果出现横滚,则结果可能会受到质疑。 2. 如果纹理穿越边界,则需要在GLSL中处理以避免似乎存在的问题。因此,要么在几何着色器中执行此操作并根据需要将四边形分成更多块,要么使用具有所需重叠的放大纹理。
现在,如果我没有漏掉任何内容,转换如下:
const float pi=3.1415926535897932384626433832795;
vec3 d = normalize(z axis from M);
vec2 dd = normalize(d.xy);
u = atan2(dd.y,dd.x);
v = acos(d.z);
u = (u+pi)/(2.0*pi);
v = v/pi

z轴提取只是从矩阵'M'中简单复制第三列/行(取决于您的符号表示),或者将(1,0,0,0)通过它进行变换。有关更多信息,请参见:

如果存在重叠的纹理,则还需要添加以下内容:

const float ov = 1.0/8.0; // overlap size
u = ov + (u/(ov+ov+1.0));
v = ov + (v/(ov+ov+1.0));

纹理将会如下所示:

overlap texture

如果你的四边形覆盖了原始纹理的1/8以上,你需要扩大重叠部分...

现在,为了处理QUAD的角而不仅仅是轴,你可以在网格本地坐标系中将四边形沿Z+方向移动距离l,对它们应用M,并使用这4个点作为方向,在顶点着色器中计算u,vl将影响四边形使用的纹理区域的大小... 这种方法甚至可能处理滚动,但我还没有测试过...

实施后,我的担忧是有根据的,因为任何两个欧拉角都会相互影响,所以结果在大多数方向上都可以,但在边缘情况下,东西会在一个或两个轴上被镜像或跳跃,这可能是由于3 DOF和2 DOF之间的面积覆盖差异造成的(除非我在代码中犯了一个错误或者驱动程序计算的数学不正确,这在以前因驱动程序中的错误而发生过)

如果你想要方位/俯仰,那应该没问题,因为它们也是2 DOF,上述等式也适用于它们,只需进行+/-某些范围转换即可。


重叠的纹理是可以接受的。我在https://jsfiddle.net/sadernalwis/adzL047x/95/上放置了代码offset_uv(),但是在与视图向量相乘后,我们得到的是Vec4而不是Mat4x4。因此,规范化M矩阵中的z轴不能仅考虑模型旋转而不考虑观察者位置...对吗? - Sadern Alwis
“弧度转换为<0.0,1.0>范围”,这是一个夹子吗? - Sadern Alwis
@SadernAlwis 不是...你必须使用线性插值,所以atan结果是(atan(...)+pi)/(2*pi)acos(...)/pi...我已经实现了它,但正如我担心的那样,它并不像你期望的那样工作,因为任何两个欧拉角都会相互影响(甚至没有roll存在),所以在某些角度上它按预期工作,在某些角度上它是镜像或跳跃的...所以除非我漏掉了什么,唯一的选择就是使用GL_CUBE_MAP并使地图扭曲以覆盖立方体/球体区域而不是单个面...然而,扭曲将是可见的... - Spektre
明白。鉴于扭曲映射和滥用性质,立方体贴图似乎不是正确的方法。我现在会更深入地研究方位角和仰角。无论如何,感谢您的所有努力和指导。我会保持帖子更新。 - Sadern Alwis
海拔作为方位角:D,目前我可以接受这个。只需要找出为什么实际方位角和海拔没有计算出来即可。 - Sadern Alwis

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接