From d6dad098dd262179d65214707976050af24c8ca0 Mon Sep 17 00:00:00 2001 From: Trent Houliston Date: Fri, 5 Jul 2019 11:22:02 +1000 Subject: [PATCH] Commit initial code for the visual mesh view rays --- .../components/visual_mesh/camera/model.ts | 20 ++++- .../visual_mesh/camera/shaders/mesh.frag | 13 ++-- .../visual_mesh/camera/shaders/mesh.vert | 75 ++++++++++++++++--- .../visual_mesh/camera/view_model.ts | 71 +++++++++--------- src/client/components/visual_mesh/network.ts | 25 ++++--- 5 files changed, 141 insertions(+), 63 deletions(-) diff --git a/src/client/components/visual_mesh/camera/model.ts b/src/client/components/visual_mesh/camera/model.ts index 038e478d..5ad0dc1d 100644 --- a/src/client/components/visual_mesh/camera/model.ts +++ b/src/client/components/visual_mesh/camera/model.ts @@ -1,14 +1,26 @@ import { observable } from 'mobx' import { Image } from '../../../image_decoder/image_decoder' +import { Matrix4 } from '../../../math/matrix4' +import { Vector2 } from '../../../math/vector2' import { VisualMeshRobotModel } from '../model' export interface VisualMesh { - readonly rows: number[] - readonly indices: number[] readonly neighbours: number[] - readonly coordinates: number[] + readonly rays: number[] readonly classifications: { dim: number, values: number[] } + readonly k: number + readonly r: number + readonly h: number +} + +export interface VisionImage extends Image { + readonly Hcw: Matrix4 + readonly lens: { + readonly projection: number + readonly focalLength: number + readonly centre: Vector2 + } } type CameraModelOpts = { @@ -20,7 +32,7 @@ export class CameraModel { readonly id: number @observable.shallow mesh?: VisualMesh - @observable.shallow image?: Image + @observable.shallow image?: VisionImage @observable name: string constructor(private model: VisualMeshRobotModel, { id, name }: CameraModelOpts) { diff --git a/src/client/components/visual_mesh/camera/shaders/mesh.frag b/src/client/components/visual_mesh/camera/shaders/mesh.frag index db13ce41..c91005db 100644 --- a/src/client/components/visual_mesh/camera/shaders/mesh.frag +++ b/src/client/components/visual_mesh/camera/shaders/mesh.frag @@ -2,7 +2,6 @@ precision lowp float; precision lowp int; uniform sampler2D image; -uniform vec2 dimensions; varying float vBall; varying float vGoal; @@ -13,13 +12,15 @@ varying float vEnvironment; varying vec2 vUv; void main() { - vec4 imgColour = texture2D(image, vUv / dimensions); + vec4 imgColour = texture2D(image, vUv); + // clang-format off vec3 classColour = vec3(1.0, 0.0, 0.0) * vBall - + vec3(1.0, 1.0, 0.0) * vGoal - + vec3(0.0, 0.0, 1.0) * vFieldLine - + vec3(0.0, 1.0, 0.0) * vField - + vec3(0.0, 0.0, 0.0) * vEnvironment; + + vec3(1.0, 1.0, 0.0) * vGoal + + vec3(0.0, 0.0, 1.0) * vFieldLine + + vec3(0.0, 1.0, 0.0) * vField + + vec3(0.0, 0.0, 0.0) * vEnvironment; + // clang-format on gl_FragColor = vec4(imgColour.xyz * 0.5 + classColour * 0.5, 1); } diff --git a/src/client/components/visual_mesh/camera/shaders/mesh.vert b/src/client/components/visual_mesh/camera/shaders/mesh.vert index 9f9de5ca..6919717a 100644 --- a/src/client/components/visual_mesh/camera/shaders/mesh.vert +++ b/src/client/components/visual_mesh/camera/shaders/mesh.vert @@ -3,14 +3,21 @@ precision lowp int; uniform mat4 projectionMatrix; uniform mat4 modelViewMatrix; +uniform vec2 dimensions; +uniform mat4 Hcw; +uniform float focalLength; +uniform vec2 centre; +uniform int projection; +uniform float k; +uniform float r; +uniform float h; -attribute vec2 position; +attribute vec3 position; attribute float ball; attribute float goal; attribute float field; attribute float fieldLine; attribute float environment; -attribute vec2 uv; varying float vBall; varying float vGoal; @@ -21,20 +28,70 @@ varying vec2 vUv; #define M_PI 3.1415926535897932384626433832795 +#define RECTILINEAR_PROJECTION 1 +#define EQUIDISTANT_PROJECTION 2 +#define EQUISOLID_PROJECTION 3 + +// TODO(trent) these should be moved into a separate GLSL file once there is a decent #include system +vec2 projectEquidistant(vec3 ray, float f, vec2 c) { + // Calculate some intermediates + float theta = acos(ray.x); + float r = f * theta; + float rSinTheta = 1.0 / sqrt(1.0 - ray.x * ray.x); + + // Work out our pixel coordinates as a 0 centred image with x to the left and y up (screen space) + vec2 screen = ray.x >= 1.0 ? vec2(0) : vec2(r * ray.y * rSinTheta, r * ray.z * rSinTheta); + + // Then apply the offset to the centre of our lens + return screen - c; +} + +vec2 projectEquisolid(vec3 ray, float f, vec2 c) { + // Calculate some intermediates + float theta = acos(ray.x); + float r = 2.0 * f * sin(theta * 0.5); + float rSinTheta = 1.0 / sqrt(1.0 - ray.x * ray.x); + + // Work out our pixel coordinates as a 0 centred image with x to the left and y up (screen space) + vec2 screen = ray.x >= 1.0 ? vec2(0) : vec2(r * ray.y * rSinTheta, r * ray.z * rSinTheta); + + // Then apply the offset to the centre of our lens + return screen - c; +} + +vec2 projectRectilinear(vec3 ray, float f, vec2 c) { + float rx = 1.0 / ray.x; + return vec2(f * ray.y * rx, f * ray.z * rx) - c; +} + +vec2 project(vec3 ray, float f, vec2 c, int projection) { + if (projection == RECTILINEAR_PROJECTION) return projectRectilinear(ray, f, c); + if (projection == EQUIDISTANT_PROJECTION) return projectEquidistant(ray, f, c); + if (projection == EQUISOLID_PROJECTION) return projectEquisolid(ray, f, c); + return vec2(0); +} + void main() { + // Rotate vector into camera space and project into image space + // Correct for OpenGL coordinate system and aspect ratio + vec2 uv = project((Hcw * vec4(position, 0)).xyz, focalLength * viewSize.x, centre, projection) + * vec2(-1.0, viewSize.x / viewSize.y); + // Forward our varyings - vUv = uv; + vUv = (uv + 1.0) * 0.5; // Classifications - vBall = ball; - vGoal = goal; - vFieldLine = fieldLine; - vField = field; + vBall = ball; + vGoal = goal; + vFieldLine = fieldLine; + vField = field; vEnvironment = environment; // Calculate our position in the mesh - float theta = M_PI * 2.0 * position.y; - vec2 pos = vec2(cos(theta) * position.x, sin(theta) * position.x); + float theta = atan2(position.y, position.x); + float phi = acos(position.z); + float n = k * (ln(abs(tan((M_PI - 2.0 * phi) * 0.25))) / ln(1.0 - 2.0 * r / h)); + vec2 pos = vec2(cos(theta), sin(theta)) * n; gl_Position = projectionMatrix * modelViewMatrix * vec4(pos, 0.0, 1.0); } diff --git a/src/client/components/visual_mesh/camera/view_model.ts b/src/client/components/visual_mesh/camera/view_model.ts index 5ddffcb4..e4e30777 100644 --- a/src/client/components/visual_mesh/camera/view_model.ts +++ b/src/client/components/visual_mesh/camera/view_model.ts @@ -1,20 +1,22 @@ import * as bounds from 'binary-search-bounds' -import { autorun } from 'mobx' import { observable } from 'mobx' import { computed } from 'mobx' +import { autorun } from 'mobx' import { createTransformer } from 'mobx-utils' -import { InterleavedBuffer, InterleavedBufferAttribute, Object3D } from 'three' +import { InterleavedBuffer, InterleavedBufferAttribute, Matrix4, Object3D, Vector3 } from 'three' import { RawShaderMaterial } from 'three' -import { Float32BufferAttribute } from 'three' +import { Vector2 } from 'three' import { Scene } from 'three' import { WebGLRenderer } from 'three' import { Mesh } from 'three' import { BufferGeometry } from 'three' import { Camera } from 'three' import { OrthographicCamera } from 'three' -import { Vector2 } from 'three' +import { Float32BufferAttribute } from 'three' import { ImageDecoder } from '../../../image_decoder/image_decoder' +import { Matrix4 as Matrix4Model } from '../../../math/matrix4' +import { Vector3 as Vector3Model } from '../../../math/vector3' import { CameraModel } from './model' import { VisualMesh } from './model' @@ -83,8 +85,18 @@ export class CameraViewModel { private visualMesh = createTransformer((mesh: VisualMesh): Object3D => { const meshMaterial = this.meshMaterial - meshMaterial.uniforms.image.value = this.decoder.texture - meshMaterial.uniforms.dimensions.value = new Vector2(this.model.image!.width, this.model.image!.height) + const { centre, focalLength, projection } = this.model.image!.lens + meshMaterial.uniforms = { + image: { value: this.decoder.texture }, + dimensions: { value: new Vector2(this.model.image!.width, this.model.image!.height) }, + Hcw: { value: this.model.image ? toThreeMatrix4(this.model.image.Hcw) : new Matrix4() }, + focalLength: { value: focalLength }, + centre: { value: new Vector2(centre.x, centre.y) }, + projection: { value: projection }, + k: { value: mesh.k }, + r: { value: mesh.r }, + h: { value: mesh.h }, + } // The UV mapped mesh const m = new Mesh(this.meshGeometry(mesh), meshMaterial) @@ -100,37 +112,15 @@ export class CameraViewModel { return new RawShaderMaterial({ vertexShader: meshVertexShader, fragmentShader: meshFragmentShader, - uniforms: { - image: { type: 't' }, - dimensions: { value: new Vector2() }, - }, }) } private meshGeometry = createTransformer((mesh: VisualMesh): BufferGeometry => { - const { rows, indices, neighbours, coordinates, classifications } = mesh - - const nElem = coordinates.length / 2 - - // Cumulative sum so we can work out which row our segments are on - const cRows = rows.reduce((acc, v, i) => { - acc.push(acc[i] + v) - return acc - }, [0]) - - // Calculate our position - const position = ([] as number[]).concat(...indices.map(i => { - // Which ring we are on as a value between 0 and 1 - const idx = bounds.le(cRows, i) - const phi = idx / rows.length - // How far around the ring we are as a value between 0 and 1 - const theta = (i - cRows[idx]) / rows[idx] - return [phi, theta] - })) - + const { neighbours, rays, classifications } = mesh // Calculate our triangle indexes + const nElem = rays.length / 3 const triangles = [] for (let i = 0; i < nElem; i++) { const ni = i * 6 @@ -144,13 +134,11 @@ export class CameraViewModel { } } - // Calculate our uv for mapping images - const uvs = coordinates - const geometry = new BufferGeometry() geometry.setIndex(triangles) - geometry.addAttribute('position', new Float32BufferAttribute(position, 2)) - geometry.addAttribute('uv', new Float32BufferAttribute(uvs, 2)) + geometry.addAttribute('position', new Float32BufferAttribute(rays, 3)) + + // TODO need Hcw and lens parameters // Read each class into a separate attribute const buffer = new InterleavedBuffer( @@ -169,3 +157,16 @@ export class CameraViewModel { }, (geometry?: BufferGeometry) => geometry && geometry.dispose()) } + +function toThreeMatrix4(mat4: Matrix4Model): Matrix4 { + return new Matrix4().set( + mat4.x.x, mat4.x.y, mat4.x.z, mat4.x.t, + mat4.y.x, mat4.y.y, mat4.y.z, mat4.y.t, + mat4.z.x, mat4.z.y, mat4.z.z, mat4.z.t, + mat4.t.x, mat4.t.y, mat4.t.z, mat4.t.t, + ) +} + +function toThreeVector3(vec3: Vector3Model): Vector3 { + return new Vector3(vec3.x, vec3.y, vec3.z) +} diff --git a/src/client/components/visual_mesh/network.ts b/src/client/components/visual_mesh/network.ts index af1a9513..e4ad0511 100644 --- a/src/client/components/visual_mesh/network.ts +++ b/src/client/components/visual_mesh/network.ts @@ -1,6 +1,8 @@ import { action } from 'mobx' import { message } from '../../../shared/proto/messages' +import { Matrix4 } from '../../math/matrix4' +import { Vector2 } from '../../math/vector2' import { Network } from '../../network/network' import { NUsightNetwork } from '../../network/nusight_network' import { RobotModel } from '../robot/model' @@ -30,7 +32,7 @@ export class VisualMeshNetwork { @action private onVisualMesh = (robotModel: RobotModel, packet: VisualMesh) => { const robot = VisualMeshRobotModel.of(robotModel) - const { cameraId, mesh, indices, neighbourhood, coordinates, classifications } = packet + const { cameraId, neighbourhood, rays, classifications, k, r, h } = packet let camera = robot.cameras.get(cameraId) if (!camera) { @@ -43,11 +45,12 @@ export class VisualMeshNetwork { // We don't need to know phi, just how many items are in each ring camera.mesh = { - rows: mesh.map(v => v.segments!), - indices, neighbours: neighbourhood!.v!, - coordinates: coordinates!.v!, - classifications: { dim: classifications!.rows!, values: classifications!.v! }, + rays: rays!.v!, + classifications: { dim: classifications!.cols!, values: classifications!.v! }, + k: k!, + r: r!, + h: h!, } } @@ -55,13 +58,11 @@ export class VisualMeshNetwork { private onImage = (robotModel: RobotModel, image: Image | CompressedImage) => { const robot = VisualMeshRobotModel.of(robotModel) const { cameraId, name, dimensions, format, data, Hcw } = image + const { projection, focalLength, centre } = image!.lens! let camera = robot.cameras.get(cameraId) if (!camera) { - camera = CameraModel.of(robot, { - id: cameraId, - name, - }) + camera = CameraModel.of(robot, { id: cameraId, name }) robot.cameras.set(cameraId, camera) } camera.image = { @@ -69,6 +70,12 @@ export class VisualMeshNetwork { height: dimensions!.y!, format, data, + lens: { + projection: projection || 0, + focalLength: focalLength! / dimensions!.x!, + centre: Vector2.from(centre), + }, + Hcw: Matrix4.from(Hcw), } camera.name = name }