Skip to content
This repository was archived by the owner on Jul 3, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/client/components/vision/camera/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ export interface GreenHorizon {

export interface VisualMesh {
readonly neighbours: number[]
readonly coordinates: number[]
readonly rays: number[]
readonly classifications: { dim: number, values: number[] }
}

Expand Down
69 changes: 58 additions & 11 deletions src/client/components/vision/camera/shaders/mesh.vert
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,13 @@ precision lowp int;

uniform mat4 projectionMatrix;
uniform mat4 modelViewMatrix;
uniform vec2 dimensions;
uniform vec2 viewSize;
uniform mat4 Hcw;
uniform float focalLength;
uniform vec2 centre;
uniform int projection;

attribute vec2 position;
attribute vec3 position;

attribute float ball;
attribute float goal;
Expand All @@ -19,17 +23,60 @@ varying float vFieldLine;
varying float vField;
varying float vEnvironment;

void main() {
// vClassification = classification;
#define RECTILINEAR_PROJECTION 1
#define EQUIDISTANT_PROJECTION 2
#define EQUISOLID_PROJECTION 3

// TODO(trent) these should be moved into a separate GLSL file once there is a decent #include system
vec2 projectEquidistant(vec3 ray, float f, vec2 c) {
// Calculate some intermediates
float theta = acos(ray.x);
float r = f * theta;
float rSinTheta = 1.0 / sqrt(1.0 - ray.x * ray.x);

// Work out our pixel coordinates as a 0 centred image with x to the left and y up (screen space)
vec2 screen = ray.x >= 1.0 ? vec2(0) : vec2(r * ray.y * rSinTheta, r * ray.z * rSinTheta);

// Then apply the offset to the centre of our lens
return screen - c;
}

vec2 projectEquisolid(vec3 ray, float f, vec2 c) {
// Calculate some intermediates
float theta = acos(ray.x);
float r = 2.0 * f * sin(theta * 0.5);
float rSinTheta = 1.0 / sqrt(1.0 - ray.x * ray.x);

// Work out our pixel coordinates as a 0 centred image with x to the left and y up (screen space)
vec2 screen = ray.x >= 1.0 ? vec2(0) : vec2(r * ray.y * rSinTheta, r * ray.z * rSinTheta);

// Calculate our position in the mesh
vec2 pos = 2.0 * ((position / dimensions) - 0.5);
pos.y *= -1.0;
// Then apply the offset to the centre of our lens
return screen - c;
}

vec2 projectRectilinear(vec3 ray, float f, vec2 c) {
float rx = 1.0 / ray.x;
return vec2(f * ray.y * rx, f * ray.z * rx) - c;
}

vec2 project(vec3 ray, float f, vec2 c, int projection) {
if (projection == RECTILINEAR_PROJECTION) return projectRectilinear(ray, f, c);
if (projection == EQUIDISTANT_PROJECTION) return projectEquidistant(ray, f, c);
if (projection == EQUISOLID_PROJECTION) return projectEquisolid(ray, f, c);
return vec2(0);
}

void main() {
// Rotate vector into camera space and project into image space
// Correct for OpenGL coordinate system and aspect ratio
// Focal length is * 2 since the width of the "image" is -1 to 1 (width of 2.0)
vec2 pos = project((Hcw * vec4(position, 0)).xyz, 2.0 * focalLength, centre, projection)
* vec2(-1.0, viewSize.x / viewSize.y);

vBall = ball;
vGoal = goal;
vFieldLine = fieldLine;
vField = field;
vBall = ball;
vGoal = goal;
vFieldLine = fieldLine;
vField = field;
vEnvironment = environment;

gl_Position = projectionMatrix * modelViewMatrix * vec4(pos, 0.0, 1.0);
Expand Down
29 changes: 11 additions & 18 deletions src/client/components/vision/camera/view_model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -179,21 +179,10 @@ export class CameraViewModel {

private meshGeometry = createTransformer((mesh: VisualMesh): BufferGeometry => {

const { neighbours, coordinates, classifications } = mesh
const { neighbours, rays, classifications } = mesh

// Calculate our triangle indexes
const nElem = mesh.coordinates.length / 2
// const triangles: number[] = []
// for (let i = 0; i < nElem; ++i) {
// const idx = i * 6
// for (let j = 0; j < 6; ++j) {
// const nIdx = idx + j
// if (neighbours[nIdx] < nElem) {
// triangles.push(i, neighbours[nIdx])
// }
// }
// }
// Calculate our triangle indexes
const nElem = mesh.rays.length / 3
const triangles = []
for (let i = 0; i < nElem; i++) {
const ni = i * 6
Expand All @@ -209,7 +198,7 @@ export class CameraViewModel {

const geometry = new BufferGeometry()
geometry.setIndex(triangles)
geometry.addAttribute('position', new Float32BufferAttribute(coordinates, 2))
geometry.addAttribute('position', new Float32BufferAttribute(rays, 3))

// Read each class into a separate attribute
const buffer = new InterleavedBuffer(
Expand All @@ -235,15 +224,19 @@ export class CameraViewModel {
depthTest: false,
depthWrite: false,
transparent: true,
uniforms: {
dimensions: { value: new Vector2() },
},
})
}

private visualmesh = createTransformer((mesh: VisualMesh) => {
const material = this.meshMaterial.clone()
material.uniforms.dimensions.value = new Vector2(this.model.image!.width, this.model.image!.height)
const { centre, focalLength, projection } = this.model.image!.lens
material.uniforms = {
Hcw: { value: this.model.image ? toThreeMatrix4(this.model.image.Hcw) : new Matrix4() },
viewSize: { value: new Vector2(this.viewWidth, this.viewHeight) },
focalLength: { value: focalLength },
centre: { value: new Vector2(centre.x, centre.y) },
projection: { value: projection },
}
const lines = new Mesh(this.meshGeometry(mesh), material)
lines.frustumCulled = false
return lines
Expand Down
4 changes: 2 additions & 2 deletions src/client/components/vision/network.ts
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ export class VisionNetwork {
@action
private onMesh(robotModel: RobotModel, packet: VisualMesh) {
const robot = VisionRobotModel.of(robotModel)
const { cameraId, neighbourhood, coordinates, classifications } = packet
const { cameraId, neighbourhood, rays, classifications } = packet

let camera = robot.cameras.get(cameraId)
if (!camera) {
Expand All @@ -80,7 +80,7 @@ export class VisionNetwork {
// We don't need to know phi, just how many items are in each ring
camera.visualmesh = {
neighbours: neighbourhood!.v!,
coordinates: coordinates!.v!,
rays: rays!.v!,
classifications: { dim: classifications!.rows!, values: classifications!.v! },
}
}
Expand Down