Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions apps/typegpu-docs/src/content/docs/fundamentals/utils.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,16 @@ The default workgroup sizes are:
The callback is not called if the global invocation id of a thread would exceed the size in any dimension.
:::

:::tip
`TgpuGuardedComputePipeline` provides getters for the underlying pipeline and the size buffer.
Those might be useful for `tgpu.resolve`, since you cannot resolve a guarded pipeline directly.

```ts
const innerPipeline = doubleUpPipeline.with(bindGroup1).pipeline;
tgpu.resolve({ externals: { innerPipeline } });
```
:::

## *console.log*

Yes, you read that correctly, TypeGPU implements logging to the console on the GPU!
Expand Down
8 changes: 8 additions & 0 deletions packages/typegpu/src/core/root/init.ts
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,14 @@ export class TgpuGuardedComputePipelineImpl<TArgs extends number[]>
// Yeah, i know we flush here... but it's only a matter of time!
this.#root.flush();
}

get pipeline() {
return this.#pipeline;
}

get sizeUniform() {
return this.#sizeUniform;
}
}

class WithBindingImpl implements WithBinding {
Expand Down
12 changes: 12 additions & 0 deletions packages/typegpu/src/core/root/rootTypes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import type {
AnyWgslData,
U16,
U32,
Vec3u,
Void,
WgslArray,
} from '../../data/wgslTypes.ts';
Expand Down Expand Up @@ -94,6 +95,17 @@ export interface TgpuGuardedComputePipeline<TArgs extends number[] = number[]> {
* "guarded" by a bounds check.
*/
dispatchThreads(...args: TArgs): void;

/**
* The underlying pipeline used during `dispatchThreads`.
*/
pipeline: TgpuComputePipeline;

/**
* The buffer used to automatically pass the thread count to the underlying pipeline during `dispatchThreads`.
* For pipelines with a dimension count lower than 3, the remaining coordinates are expected to be 1.
*/
sizeUniform: TgpuUniform<Vec3u>;
}

export interface WithCompute {
Expand Down
140 changes: 140 additions & 0 deletions packages/typegpu/tests/pipeline-resolution.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import { describe, expect } from 'vitest';
import * as d from '../src/data/index.ts';
import tgpu from '../src/index.ts';
import { it } from './utils/extendedIt.ts';

describe('resolve', () => {
const Boid = d.struct({
position: d.vec2f,
color: d.vec4f,
});

const computeFn = tgpu['~unstable'].computeFn({
workgroupSize: [1, 1, 1],
in: { gid: d.builtin.globalInvocationId },
})(() => {
const myBoid = Boid({
position: d.vec2f(0, 0),
color: d.vec4f(1, 0, 0, 1),
});
});

const vertexFn = tgpu['~unstable'].vertexFn({
out: { pos: d.builtin.position, color: d.vec4f },
})(() => {
const myBoid = Boid();
return { pos: d.vec4f(myBoid.position, 0, 1), color: myBoid.color };
});

const fragmentFn = tgpu['~unstable'].fragmentFn({
in: { color: d.vec4f },
out: d.vec4f,
})((input) => {
return input.color;
});

it('can resolve a render pipeline', ({ root }) => {
const pipeline = root
.withVertex(vertexFn, {})
.withFragment(fragmentFn, { format: 'rgba8unorm' })
.createPipeline();

expect(tgpu.resolve({ externals: { pipeline } })).toMatchInlineSnapshot(`
"struct Boid_1 {
position: vec2f,
color: vec4f,
}

struct vertexFn_Output_2 {
@builtin(position) pos: vec4f,
@location(0) color: vec4f,
}

@vertex fn vertexFn_0() -> vertexFn_Output_2 {
var myBoid = Boid_1();
return vertexFn_Output_2(vec4f(myBoid.position, 0, 1), myBoid.color);
}

struct fragmentFn_Input_4 {
@location(0) color: vec4f,
}

@fragment fn fragmentFn_3(input: fragmentFn_Input_4) -> @location(0) vec4f {
return input.color;
}"
`);
});

it('can resolve a compute pipeline', ({ root }) => {
const pipeline = root
.withCompute(computeFn)
.createPipeline();

expect(tgpu.resolve({ externals: { pipeline } })).toMatchInlineSnapshot(`
"struct Boid_1 {
position: vec2f,
color: vec4f,
}

struct computeFn_Input_2 {
@builtin(global_invocation_id) gid: vec3u,
}

@compute @workgroup_size(1, 1, 1) fn computeFn_0(_arg_0: computeFn_Input_2) {
var myBoid = Boid_1(vec2f(), vec4f(1, 0, 0, 1));
}"
`);
});

it('can resolve a guarded compute pipeline', ({ root }) => {
const pipelineGuard = root.createGuardedComputePipeline((x, y, z) => {
'use gpu';
const myBoid = Boid({
position: d.vec2f(0, 0),
color: d.vec4f(x, y, z, 1),
});
});

expect(tgpu.resolve({ externals: { pipeline: pipelineGuard.pipeline } }))
.toMatchInlineSnapshot(`
"@group(0) @binding(0) var<uniform> sizeUniform_1: vec3u;

struct Boid_3 {
position: vec2f,
color: vec4f,
}

fn wrappedCallback_2(x: u32, y: u32, z: u32) {
var myBoid = Boid_3(vec2f(), vec4f(f32(x), f32(y), f32(z), 1));
}

struct mainCompute_Input_4 {
@builtin(global_invocation_id) id: vec3u,
}

@compute @workgroup_size(8, 8, 4) fn mainCompute_0(in: mainCompute_Input_4) {
if (any(in.id >= sizeUniform_1)) {
return;
}
wrappedCallback_2(in.id.x, in.id.y, in.id.z);
}"
`);
});

it('throws when resolving multiple pipelines', ({ root }) => {
const renderPipeline = root
.withVertex(vertexFn, {})
.withFragment(fragmentFn, { format: 'rgba8unorm' })
.createPipeline();

const computePipeline = root
.withCompute(computeFn)
.createPipeline();

expect(() =>
tgpu.resolve({ externals: { renderPipeline, computePipeline } })
).toThrowErrorMatchingInlineSnapshot(
`[Error: Found 2 pipelines but can only resolve one at a time.]`,
);
});
});