VR中的着色器分析
Analysis of a shader in VR
我想创建一个采用世界坐标并创建波浪的着色器。我想分析视频并了解所需的步骤。
我不是在寻找代码,我只是在寻找关于如何使用 GLSL 或 HLSL 或任何其他语言实现它的想法。
此处为低质量和 fps GIF,以防 link 损坏。
这是片段着色器:
#version 330 core
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
// highlight effect
in float pixel_z; // fragment z coordinate in [LCS]
uniform float animz; // highlight animation z coordinate [GCS]
// Ouput data
out vec4 color;
vec3 c;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
uniform mat4 MV;
uniform vec3 LightPosition_worldspace;
void main(){
// Light emission properties
// You probably want to put them as uniforms
vec3 LightColor = vec3(1,1,1);
float LightPower = 50.0f;
// Material properties
vec3 MaterialDiffuseColor = texture( myTextureSampler, UV ).rgb;
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3);
// Distance to the light
float distance = length( LightPosition_worldspace - Position_worldspace );
// Normal of the computed fragment, in camera space
vec3 n = normalize( Normal_cameraspace );
// Direction of the light (from the fragment to the light)
vec3 l = normalize( LightDirection_cameraspace );
// Cosine of the angle between the normal and the light direction,
// clamped above 0
// - light is at the vertical of the triangle -> 1
// - light is perpendicular to the triangle -> 0
// - light is behind the triangle -> 0
float cosTheta = clamp( dot( n,l ), 0,1 );
// Eye vector (towards the camera)
vec3 E = normalize(EyeDirection_cameraspace);
// Direction in which the triangle reflects the light
vec3 R = reflect(-l,n);
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E,R ), 0,1 );
c =
// Ambient : simulates indirect lighting
MaterialAmbientColor +
// Diffuse : "color" of the object
MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) +
// Specular : reflective highlight, like a mirror
MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance);
float z;
z=abs(pixel_z-animz); // distance to animated z coordinate
z*=1.5; // scale to change highlight width
if (z<1.0)
{
z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
z=0.5*cos(z);
color+=vec3(0.0,z,z);
}
color=vec4(c,1.0);
}
这里是顶点着色器:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec2 vertexUV;
layout(location = 2) in vec3 vertexNormal_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out float pixel_z; // fragment z coordinate in [LCS]
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
void main(){
pixel_z=vertexPosition_modelspace.z;
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
// Position of the vertex, in worldspace : M * position
Position_worldspace = (M * vec4(vertexPosition_modelspace,1)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(vertexPosition_modelspace,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
// Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace;
// Normal of the the vertex, in camera space
Normal_cameraspace = ( V * M * vec4(vertexNormal_modelspace,0)).xyz; // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
// UV of the vertex. No special space for this one.
UV = vertexUV;
}
我可以想到两种方法:
基于3D重建
所以你需要从运动中重建 3D 场景(这不是一个简单的任务和我喜欢的方式)。然后您只需根据 u,v
纹理映射坐标和动画时间对选定的网格纹理应用调制。
描述此类主题不适合 SO 答案,因此您应该 google 一些关于该主题的 CV books/papers。
基于图像处理
您只需根据颜色 continuity/homogenity 对图像进行分割。因此,您将具有相似颜色和强度(生长区域)的相邻像素分组。完成后,尝试根据类似于此的强度梯度来伪造表面 3D 重建:
然后创建 u,v
映射,其中一个轴是深度。
完成后,只需将正弦波效果调制应用于颜色即可。
我会把它分成两个阶段。第一遍将分割(为此我会选择 CPU 侧),第二遍用于效果渲染(在 GPU 上)。
由于这是增强现实的形式,您还应该阅读以下内容:
顺便说一句,在该视频上所做的都不是上述选项。他们很可能已经为该汽车设置了矢量形式的网格,并使用轮廓匹配来获取其在图像上的方向……并照常渲染……因此它不适用于场景中的任何对象,但仅适用于该汽车。 .. 像这样:
- How to get the transformation matrix of a 3d model to object in a 2d image
[Edit1] GLSL高亮效果
我举了这个例子:
并像这样给它添加了亮点:
在CPU这边我添加了animz
变量
确定高亮实际放置在对象局部坐标系LCS中的z
坐标。我在渲染网格(立方体)的最小和最大 z 值 +/- 一些边距之间的计时器中对其进行动画处理,因此高光不会立即从对象的一侧传送到另一侧...
// global
float animz=-1.0;
// in timer
animz+=0.05; if (animz>1.5) animz=-1.5; // my object z = <-1,+1> 0.5 is margin
// render
id=glGetUniformLocation(prog_id,"animz"); glUniform1f(id,animz);
顶点着色器
我只获取顶点 z 坐标并传递它而不转换为片段
out float pixel_z; // fragment z coordinate in [LCS]
pixel_z=pos.z;
片段着色器
计算目标颜色 c
后(通过标准渲染)我计算 pixel_z
和 animz
的距离,如果很小,那么我用正弦波调制 c
取决于距离。
// highlight effect
float z;
z=abs(pixel_z-animz); // distance to animated z coordinate
z*=1.5; // scale to change highlight width
if (z<1.0)
{
z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
z=0.5*cos(z);
c+=vec3(0.0,z,z);
}
这里是完整的 GLSL 着色器...
顶点:
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec3 pos;
layout(location = 2) in vec3 nor;
layout(location = 3) in vec3 col;
layout(location = 0) uniform mat4 m_model; // model matrix
layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0)
layout(location =32) uniform mat4 m_view; // inverse of camera matrix
layout(location =48) uniform mat4 m_proj; // projection matrix
out vec3 pixel_pos; // fragment position [GCS]
out vec3 pixel_col; // fragment surface color
out vec3 pixel_nor; // fragment surface normal [GCS]
// highlight effect
out float pixel_z; // fragment z coordinate in [LCS]
void main()
{
pixel_z=pos.z;
pixel_col=col;
pixel_pos=(m_model*vec4(pos,1)).xyz;
pixel_nor=(m_normal*vec4(nor,1)).xyz;
gl_Position=m_proj*m_view*m_model*vec4(pos,1);
}
片段:
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// highlight effect
in float pixel_z; // fragment z coordinate in [LCS]
uniform float animz; // highlight animation z coordinate [GCS]
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// highlight effect
float z;
z=abs(pixel_z-animz); // distance to animated z coordinate
z*=1.5; // scale to change highlight width
if (z<1.0)
{
z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
z=0.5*cos(z);
c+=vec3(0.0,z,z);
}
col=vec4(c,1.0);
}
并预览:
这种方法不需要纹理,也不需要 u,v
贴图。
[Edit2] 高亮起点
实现它的方法有很多种。我选择与起点的距离作为高亮参数。所以高光会从点向四面八方生长。此处预览两个不同的触摸点位置:
白色粗十字是为目测呈现的触摸点位置。这里的代码:
顶点:
// Vertex
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec3 pos;
layout(location = 2) in vec3 nor;
layout(location = 3) in vec3 col;
layout(location = 0) uniform mat4 m_model; // model matrix
layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0)
layout(location =32) uniform mat4 m_view; // inverse of camera matrix
layout(location =48) uniform mat4 m_proj; // projection matrix
out vec3 LCS_pos; // fragment position [LCS]
out vec3 pixel_pos; // fragment position [GCS]
out vec3 pixel_col; // fragment surface color
out vec3 pixel_nor; // fragment surface normal [GCS]
void main()
{
LCS_pos=pos;
pixel_col=col;
pixel_pos=(m_model*vec4(pos,1)).xyz;
pixel_nor=(m_normal*vec4(nor,1)).xyz;
gl_Position=m_proj*m_view*m_model*vec4(pos,1);
}
片段:
// Fragment
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 LCS_pos; // fragment position [LCS]
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// highlight effect
uniform vec3 touch; // highlight start point [GCS]
uniform float animt; // animation parameter <0,1> or -1 for off
uniform float size; // highlight size
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// highlight effect
float t=length(LCS_pos-touch)/size; // distance from start point
if (t<=animt)
{
t*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
t=0.75*cos(t);
c+=vec3(0.0,t,t);
}
col=vec4(c,1.0);
}
你用制服来控制这个:
uniform vec3 touch; // highlight start point [GCS]
uniform float animt; // animation parameter <0,1> or -1 for off
uniform float size; // max distance of any point of object from touch point
我想创建一个采用世界坐标并创建波浪的着色器。我想分析视频并了解所需的步骤。 我不是在寻找代码,我只是在寻找关于如何使用 GLSL 或 HLSL 或任何其他语言实现它的想法。
此处为低质量和 fps GIF,以防 link 损坏。
这是片段着色器:
#version 330 core
// Interpolated values from the vertex shaders
in vec2 UV;
in vec3 Position_worldspace;
in vec3 Normal_cameraspace;
in vec3 EyeDirection_cameraspace;
in vec3 LightDirection_cameraspace;
// highlight effect
in float pixel_z; // fragment z coordinate in [LCS]
uniform float animz; // highlight animation z coordinate [GCS]
// Ouput data
out vec4 color;
vec3 c;
// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;
uniform mat4 MV;
uniform vec3 LightPosition_worldspace;
void main(){
// Light emission properties
// You probably want to put them as uniforms
vec3 LightColor = vec3(1,1,1);
float LightPower = 50.0f;
// Material properties
vec3 MaterialDiffuseColor = texture( myTextureSampler, UV ).rgb;
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3);
// Distance to the light
float distance = length( LightPosition_worldspace - Position_worldspace );
// Normal of the computed fragment, in camera space
vec3 n = normalize( Normal_cameraspace );
// Direction of the light (from the fragment to the light)
vec3 l = normalize( LightDirection_cameraspace );
// Cosine of the angle between the normal and the light direction,
// clamped above 0
// - light is at the vertical of the triangle -> 1
// - light is perpendicular to the triangle -> 0
// - light is behind the triangle -> 0
float cosTheta = clamp( dot( n,l ), 0,1 );
// Eye vector (towards the camera)
vec3 E = normalize(EyeDirection_cameraspace);
// Direction in which the triangle reflects the light
vec3 R = reflect(-l,n);
// Cosine of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Looking into the reflection -> 1
// - Looking elsewhere -> < 1
float cosAlpha = clamp( dot( E,R ), 0,1 );
c =
// Ambient : simulates indirect lighting
MaterialAmbientColor +
// Diffuse : "color" of the object
MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) +
// Specular : reflective highlight, like a mirror
MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance);
float z;
z=abs(pixel_z-animz); // distance to animated z coordinate
z*=1.5; // scale to change highlight width
if (z<1.0)
{
z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
z=0.5*cos(z);
color+=vec3(0.0,z,z);
}
color=vec4(c,1.0);
}
这里是顶点着色器:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec2 vertexUV;
layout(location = 2) in vec3 vertexNormal_modelspace;
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec3 Position_worldspace;
out vec3 Normal_cameraspace;
out vec3 EyeDirection_cameraspace;
out vec3 LightDirection_cameraspace;
out float pixel_z; // fragment z coordinate in [LCS]
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform vec3 LightPosition_worldspace;
void main(){
pixel_z=vertexPosition_modelspace.z;
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
// Position of the vertex, in worldspace : M * position
Position_worldspace = (M * vec4(vertexPosition_modelspace,1)).xyz;
// Vector that goes from the vertex to the camera, in camera space.
// In camera space, the camera is at the origin (0,0,0).
vec3 vertexPosition_cameraspace = ( V * M * vec4(vertexPosition_modelspace,1)).xyz;
EyeDirection_cameraspace = vec3(0,0,0) - vertexPosition_cameraspace;
// Vector that goes from the vertex to the light, in camera space. M is ommited because it's identity.
vec3 LightPosition_cameraspace = ( V * vec4(LightPosition_worldspace,1)).xyz;
LightDirection_cameraspace = LightPosition_cameraspace + EyeDirection_cameraspace;
// Normal of the the vertex, in camera space
Normal_cameraspace = ( V * M * vec4(vertexNormal_modelspace,0)).xyz; // Only correct if ModelMatrix does not scale the model ! Use its inverse transpose if not.
// UV of the vertex. No special space for this one.
UV = vertexUV;
}
我可以想到两种方法:
基于3D重建
所以你需要从运动中重建 3D 场景(这不是一个简单的任务和我喜欢的方式)。然后您只需根据
u,v
纹理映射坐标和动画时间对选定的网格纹理应用调制。描述此类主题不适合 SO 答案,因此您应该 google 一些关于该主题的 CV books/papers。
基于图像处理
您只需根据颜色 continuity/homogenity 对图像进行分割。因此,您将具有相似颜色和强度(生长区域)的相邻像素分组。完成后,尝试根据类似于此的强度梯度来伪造表面 3D 重建:
然后创建
u,v
映射,其中一个轴是深度。完成后,只需将正弦波效果调制应用于颜色即可。
我会把它分成两个阶段。第一遍将分割(为此我会选择 CPU 侧),第二遍用于效果渲染(在 GPU 上)。
由于这是增强现实的形式,您还应该阅读以下内容:
顺便说一句,在该视频上所做的都不是上述选项。他们很可能已经为该汽车设置了矢量形式的网格,并使用轮廓匹配来获取其在图像上的方向……并照常渲染……因此它不适用于场景中的任何对象,但仅适用于该汽车。 .. 像这样:
- How to get the transformation matrix of a 3d model to object in a 2d image
[Edit1] GLSL高亮效果
我举了这个例子:
并像这样给它添加了亮点:
在CPU这边我添加了
animz
变量确定高亮实际放置在对象局部坐标系LCS中的
z
坐标。我在渲染网格(立方体)的最小和最大 z 值 +/- 一些边距之间的计时器中对其进行动画处理,因此高光不会立即从对象的一侧传送到另一侧...// global float animz=-1.0; // in timer animz+=0.05; if (animz>1.5) animz=-1.5; // my object z = <-1,+1> 0.5 is margin // render id=glGetUniformLocation(prog_id,"animz"); glUniform1f(id,animz);
顶点着色器
我只获取顶点 z 坐标并传递它而不转换为片段
out float pixel_z; // fragment z coordinate in [LCS] pixel_z=pos.z;
片段着色器
计算目标颜色
c
后(通过标准渲染)我计算pixel_z
和animz
的距离,如果很小,那么我用正弦波调制c
取决于距离。// highlight effect float z; z=abs(pixel_z-animz); // distance to animated z coordinate z*=1.5; // scale to change highlight width if (z<1.0) { z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle z=0.5*cos(z); c+=vec3(0.0,z,z); }
这里是完整的 GLSL 着色器...
顶点:
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec3 pos;
layout(location = 2) in vec3 nor;
layout(location = 3) in vec3 col;
layout(location = 0) uniform mat4 m_model; // model matrix
layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0)
layout(location =32) uniform mat4 m_view; // inverse of camera matrix
layout(location =48) uniform mat4 m_proj; // projection matrix
out vec3 pixel_pos; // fragment position [GCS]
out vec3 pixel_col; // fragment surface color
out vec3 pixel_nor; // fragment surface normal [GCS]
// highlight effect
out float pixel_z; // fragment z coordinate in [LCS]
void main()
{
pixel_z=pos.z;
pixel_col=col;
pixel_pos=(m_model*vec4(pos,1)).xyz;
pixel_nor=(m_normal*vec4(nor,1)).xyz;
gl_Position=m_proj*m_view*m_model*vec4(pos,1);
}
片段:
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// highlight effect
in float pixel_z; // fragment z coordinate in [LCS]
uniform float animz; // highlight animation z coordinate [GCS]
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// highlight effect
float z;
z=abs(pixel_z-animz); // distance to animated z coordinate
z*=1.5; // scale to change highlight width
if (z<1.0)
{
z*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
z=0.5*cos(z);
c+=vec3(0.0,z,z);
}
col=vec4(c,1.0);
}
并预览:
这种方法不需要纹理,也不需要 u,v
贴图。
[Edit2] 高亮起点
实现它的方法有很多种。我选择与起点的距离作为高亮参数。所以高光会从点向四面八方生长。此处预览两个不同的触摸点位置:
白色粗十字是为目测呈现的触摸点位置。这里的代码:
顶点:
// Vertex
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location = 0) in vec3 pos;
layout(location = 2) in vec3 nor;
layout(location = 3) in vec3 col;
layout(location = 0) uniform mat4 m_model; // model matrix
layout(location =16) uniform mat4 m_normal; // model matrix with origin=(0,0,0)
layout(location =32) uniform mat4 m_view; // inverse of camera matrix
layout(location =48) uniform mat4 m_proj; // projection matrix
out vec3 LCS_pos; // fragment position [LCS]
out vec3 pixel_pos; // fragment position [GCS]
out vec3 pixel_col; // fragment surface color
out vec3 pixel_nor; // fragment surface normal [GCS]
void main()
{
LCS_pos=pos;
pixel_col=col;
pixel_pos=(m_model*vec4(pos,1)).xyz;
pixel_nor=(m_normal*vec4(nor,1)).xyz;
gl_Position=m_proj*m_view*m_model*vec4(pos,1);
}
片段:
// Fragment
#version 400 core
#extension GL_ARB_explicit_uniform_location : enable
layout(location =64) uniform vec3 lt_pnt_pos;// point light source position [GCS]
layout(location =67) uniform vec3 lt_pnt_col;// point light source color&strength
layout(location =70) uniform vec3 lt_amb_col;// ambient light source color&strength
in vec3 LCS_pos; // fragment position [LCS]
in vec3 pixel_pos; // fragment position [GCS]
in vec3 pixel_col; // fragment surface color
in vec3 pixel_nor; // fragment surface normal [GCS]
out vec4 col;
// highlight effect
uniform vec3 touch; // highlight start point [GCS]
uniform float animt; // animation parameter <0,1> or -1 for off
uniform float size; // highlight size
void main()
{
// standard rendering
float li;
vec3 c,lt_dir;
lt_dir=normalize(lt_pnt_pos-pixel_pos); // vector from fragment to point light source in [GCS]
li=dot(pixel_nor,lt_dir);
if (li<0.0) li=0.0;
c=pixel_col*(lt_amb_col+(lt_pnt_col*li));
// highlight effect
float t=length(LCS_pos-touch)/size; // distance from start point
if (t<=animt)
{
t*=0.5*3.1415926535897932384626433832795; // z=<0,M_PI/2> 0 in the middle
t=0.75*cos(t);
c+=vec3(0.0,t,t);
}
col=vec4(c,1.0);
}
你用制服来控制这个:
uniform vec3 touch; // highlight start point [GCS]
uniform float animt; // animation parameter <0,1> or -1 for off
uniform float size; // max distance of any point of object from touch point