我在实现放大镜效果的时候和老师讲的不太一样。
来源:4-5 实战:实现基于 webgl 的放大镜

慕运维5378130
2024-03-03
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
<style>
.text {
height: 560px;
width: 560px;
border: 1px solid #000;
margin-top: 5vh;
margin-left: calc(50vw - 280px);
background-color: #0003;
}
body {
height: 100vh;
overflow: hidden;
margin: 0;
}
</style>
</head>
<body>
<script src="../lib.js"></script>
<script>
// 使用attribute变量
const canvas = document.createElement("canvas");
document.body.appendChild(canvas);
canvas.classList.add("text");
const gl = canvas.getContext("webgl");
// 第一步编写着色器源(着色器执行内容)
const VERTEX_SHADER_SOURCE = `
attribute vec4 aPosition;
attribute vec4 aTex;
varying vec2 vTex;
varying vec4 vPosition;
void main() {
vPosition = aPosition;
gl_Position = aPosition;
vTex = vec2(aTex.x, aTex.y);
}
`;
const FRAGMENT_SHADER_SOURCE = `
precision mediump float;
uniform sampler2D uSampler;
uniform vec2 lookAt;
varying vec2 vTex;
varying vec4 vPosition;
void main() {
vec2 uv = vTex;
float fOpacity = 0.0;
float dis = distance(lookAt, vec2(vPosition));
if(dis > 0.2) {
fOpacity = 0.05;
} else {
fOpacity = 1.0;
vec2 diff = vPosition.xy - lookAt;
uv.x -= diff.x * 0.2;
uv.y += diff.y * 0.2;
}
vec4 color = texture2D(uSampler, uv );
gl_FragColor = vec4(color.xyz * fOpacity, fOpacity);
}
`;
const program = initShader(
gl,
VERTEX_SHADER_SOURCE,
FRAGMENT_SHADER_SOURCE
);
// 获取attribute变量
const aPosition = gl.getAttribLocation(program, "aPosition");
const aTex = gl.getAttribLocation(program, "aTex");
const uSampler = gl.getUniformLocation(program, "uSampler")
const lookAt = gl.getUniformLocation(program, "lookAt")
const points = new Float32Array([
-0.9, 0.9, 0.0, 1.0,
-0.9, -0.9, 0.0, 0.0,
0.9, 0.9, 1.0, 1.0,
0.9, -0.9, 1.0, 0.0,
]);
const buffer = gl.createBuffer();
const BYTES = points.BYTES_PER_ELEMENT;
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, points, gl.STATIC_DRAW);
gl.vertexAttribPointer(aPosition, 2, gl.FLOAT, false, BYTES * 4, 0);
gl.enableVertexAttribArray(aPosition);
gl.vertexAttribPointer(aTex, 2, gl.FLOAT, false, BYTES * 4, BYTES * 2);
gl.enableVertexAttribArray(aTex);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
const img = new Image();
img.addEventListener("load", () => {
// 创建纹理对象
const texture = gl.createTexture();
// 翻转图片Y轴
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, 1);
// 开启一个纹理单元
gl.activeTexture(gl.TEXTURE0);
// 绑定纹理对象
gl.bindTexture(gl.TEXTURE_2D, texture)
// 处理放大缩小的逻辑
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR)
// 处理横向和纵向平铺的方式
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
// 配置纹理图像
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img)
gl.uniform1i(uSampler, 0)
})
img.src = "../assets/content.png"
const draw = () => {
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
requestAnimationFrame(draw)
}
draw()
document.addEventListener('mousemove',e => {
const x = e.clientX;
const y = e.clientY;
const { left, top, width, height } = e.target.getBoundingClientRect();
const resX = x - left;
const resY = y - top;
const clickX = (resX - width / 2) / (width / 2);
const clickY = (height / 2 - resY) / (height / 2)
gl.uniform2fv(lookAt, [clickX, clickY])
})
</script>
</body>
</html>
在uv.y += diff.y * 0.2;
时图片在y轴方向上是缩小的;
在uv.y -= diff.y * 0.2;
时图片在y轴方向上是放大的;
写回答
1回答
-
yancy
2024-12-30
这个可以看下世界坐标的建立是否是一样的,跟视角和视距也有一定的关系
00
相似问题