-
Notifications
You must be signed in to change notification settings - Fork 0
/
transform.h
151 lines (111 loc) · 5.21 KB
/
transform.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#pragma once
#include "geometry.h"
const float pi = 3.1415926535897932385;
#define DEG2RAD pi/180.0
mat4 viewport(int x_v , int y_v , int w , int h , int depth){
/*
Function that produces a matrix that would transform world coords to screen coords
--> It maps the following coord ranges in world space to screen space:
[x_wmin,x_wmax] --> [x_v, x_v + w]
[y_wmin,y_wmax] --> [y_v, y_v + h]
[z_wmin,z_wmax] --> [z_v, z_v + depth]
here we are assuming z_v == 0, so range is [0,depth]
--> This is made up of translations and scaling
after a bit of math(equaling normalized coordinates in world and screen space) we get the following equations :
x_s = (x-x_wmin)*s_x + x_v
y_s = (y-y_wmin)*s_y + y_v
z_s = (z-z_wmin)*s_z + z_v
where, (x,y,z) are coords in world space belonging to repective ranges,and,(x_s, y_s, z_s) are coords in screen space, and s_x , s_y and s_z are scaling factors, calculated as :
s_x = (w)/(x_wmax - x_wmin) and similar for s_y and s_z
--> The corresponding matrix is :
[s_x 0 0 -(x_wmin*s_x) + x_v]
[0 s_y 0 -(y_wmin*s_y) + y_v]
[0 0 s_z -(z_wmin*s_z) + z_v]
[0 0 0 1 ]
--> Consider the following for predescribed world_coords and screen_coords ranges:
x ==> [-1,1] and [x_v, x_v + w]
y ==> [-1,1] and [y_v, y_v + h]
z ==> [-1,1] and [0,depth]
|--> The corresponding matrix is :
[w/2 0 0 x_v + w/2 ]
[0 h/2 0 y_v + h/2 ]
[0 0 depth/2 depth/2 ]
[0 0 0 1 ]
This basically creates a "viewport" with lower left corner at (x_v,y_v,0) and extending width,height,depth along respec. axes
*/
mat4 m = mat4::identity();
//translations
m[0][0] = w/2.0f;
m[1][1] = h/2.0f;
m[2][2] = depth/2.0f;
m[0][3] = x_v + w/2;
m[1][3] = y_v + h/2;
m[2][3] = depth/2;
return m;
}
mat4 lookat(vec3f camPos, vec3f targetPos,vec3f globalUp = vec3f(0.0,1.0,0.0)){
/*
The Camera Transform or The View Matrix
--> Imitates glm::lookAt()
--> Returns a matrix that transforms local space to view/camera space
--> Need to transform local space coords to world space coords then apply camera transform to transform all vertices as if viewed from the camera
Inputs needed would be :
1. camera pos
2. target pos
3. global_up vector(global up vector)
Aim is to perform a change of basis, that is to change from standard 3d basis to a basis relative to camera pos,
The basis vectors would be : cameraDir(from camPos to targetPos) , camUp , camRight
camDir = (camPos - targetPos).normalize
camRight = cross(global_up,camDir).normalize
camUp = cross(camDir,camRight).normalize
There now we have the new basis vectors, just calculate a change of basis matrix to oreint everything according to the camera , also translate everything opposite to camera's position since after change of basis camera must be at the origin looking down the negative z axis
This gives us the matrices:
(1)The change of basis matrix is :
[ | | | ]
[ camRight camUp camDir ]
[ | | | ]
[ | | | ]
Now is actuality we are not moving around with our camera but ratheer moving the scene opp to motion of camera , hence for a true transform , we need to multiply with inverse of the above matrix , and it happend to be that the above matrix is orthogonal hence , its inverse is its TRANSPOSE
(2)Translation Matrix , again since motion is opp to camera , we translate everything by (-camPos)
The matrix is :
[1 0 0 -camPos.x ]
[0 1 0 -camPos.y ]
[0 0 1 -camPos.z ]
[0 0 0 1 ]
If we apply these two matrices we get what is called the Model View matrix/transform we simply transforms all coords as if being viewed from the camera.
*/
mat4 R = mat4::identity();
mat4 T = mat4::identity();
vec3f camDir = (camPos - targetPos).normalize();
// vec3f camDir = (targetPos - camPos).normalize();
vec3f camRight = cross(globalUp,camDir).normalize();
vec3f camUp = cross(camDir,camRight).normalize();
R[0][0] = camRight.x; R[0][1] = camRight.y; R[0][2] = camRight.z; R[0][3] = 0;
R[1][0] = camUp.x; R[1][1] = camUp.y; R[1][2] = camUp.z; R[1][3] = 0;
R[2][0] = camDir.x; R[2][1] = camDir.y; R[2][2] = camDir.z; R[2][3] = 0;
R[3][0] = 0; R[3][1] = 0; R[3][2] = 0; R[3][3] = 1;
T[0][3] = -camPos.x;
T[1][3] = -camPos.y;
T[2][3] = -camPos.z;
return R*T;
}
mat4 projection(float l, float r, float b, float t, float n, float f)
{
mat4 P = mat4::identity(); // Persepective transform to transform frustum into viewing cube , includes the transformation to transform viewing cube to bi-unit cube, for more details checkout http://www.songho.ca/opengl/gl_projectionmatrix.html
P[0][0] = (-2.0f*n)/(r-l);
P[0][2] = -(r+l)/(r-l);
P[1][1] = (-2.0f*n)/(t-b);
P[1][2] = -(t+b)/(t-b);
P[2][2] = -(f+n)/(f-n);
P[2][3] = (-2.0f*n*f)/(f-n);
P[3][2] = 1;
P[3][3] = 0;
return P;
}
mat4 projection(float v_fov, float aspectRatio, float front, float back){
float theta = v_fov*DEG2RAD;
float tangent = tanf(theta/2); // tangent of half v_fov
float height = front * tangent; // half height of near plane
float width = height * aspectRatio; // half width of near plane
return projection(-width, width, -height, height, front, back);
}