hikari – New Material System

The next step in building hikari’s terrain system is proper material handling. In order to do so, I need to completely revamp the material system. This is going to be extensive enough to earn its own post.

Current Material System:

Here is the current state of hikari’s material “system”, a single struct:

struct Material {
    GLuint normal_map;
    GLuint albedo_map;
    GLuint metallic_map;
    GLuint roughness_map;
    GLuint ao_map;

    bool has_alpha;
    bool is_leaf;
    bool is_terrain;
};

And its usage, from the per-mesh main render loop:

main_pass.Begin();
main_pass.SetRenderTarget(GL_FRAMEBUFFER, &hdr_buffer_rt_ms);

RenderMesh * rmesh = meshes + i;
IndexedMesh * mesh = rmesh->mesh;
Material * material = rmesh->material;
if (material->has_alpha) {
    glDepthMask(GL_TRUE);
    glDepthFunc(GL_LESS);
}
else {
    glDepthMask(GL_FALSE);
    glDepthFunc(GL_EQUAL);
}
Assert(mesh && material, "Cannot render without both mesh and material!");

GLuint shader_for_mat = default_shader;
if (material->is_leaf) {
    shader_for_mat = leaf_shader;
}
if (material->is_terrain) {
    shader_for_mat = terrain_shader;
}
main_pass.SetShader(shader_for_mat);

main_pass.BindTexture(GL_TEXTURE_2D, "normal_map", material->normal_map);
main_pass.BindTexture(GL_TEXTURE_2D, "albedo_map", material->albedo_map);
main_pass.BindTexture(GL_TEXTURE_2D, "metallic_map", material->metallic_map);
main_pass.BindTexture(GL_TEXTURE_2D, "roughness_map", material->roughness_map);
main_pass.BindTexture(GL_TEXTURE_2D, "ao_map", material->ao_map);

// Material independant shader setup (lights list, shadow map binding,
// environment maps, etc.), elided for space.

// Draw the mesh.
mesh->SetAttribPointersForShader(main_pass.active_shader);
mesh->Draw();

main_pass.End();

The current notion of material is a set of textures and some flags used to determine the correct shader. Needless to say, this is neither flexible nor particularly efficient. Instead, what I’d like is a Material that consists of two bags of data: render states (shader program, blend mode, depth test) and shader uniforms (texture maps).

New Material System, First Pass:

struct Material {
    GLuint shader_program;
    GLenum depth_mask;
    GLenum depth_func;

    struct Uniform {
        const char * name;
        union {
            float value_f[4];
            Texture value_texture;
        };

        enum UniformType {
            Float1,
            Float2,
            Float3,
            Float4,

            TexHandle,
        } type;
    };

    std::vector<Uniform> uniforms;

    // Uniform setup methods elided

    inline void
    BindForRenderPass(RenderPass * pass) {
        pass->SetShader(shader_program);
        glDepthMask(depth_mask);
        glDepthFunc(depth_func);

        for (u32 i = 0; i < uniforms.size(); ++i) {
             Uniform u = uniforms[i];
             switch (u.type) {
                 case Uniform::UniformType::Float1: {
                     pass->SetUniform1f(u.name, u.value_f[0]);
                } break;
                case Uniform::UniformType::Float2: {
                    pass->SetUniform2f(u.name, u.value_f[0], u.value_f[1]);
                } break;
                case Uniform::UniformType::Float3: {
                    pass->SetUniform3f(u.name, u.value_f[0], u.value_f[1], u.value_f[2]);
                } break;
                case Uniform::UniformType::Float4: {
                    pass->SetUniform4f(u.name, u.value_f[0], u.value_f[1], u.value_f[2], u.value_f[3]);
                } break;
                case Uniform::UniformType::TexHandle: {
                    pass->BindTexture(u.name, u.value_texture);
                } break;
            }
        }
    }
};

A good first step. No more branching to select shaders inside the render loop, no hardcoded set of textures. Still doing some silly things, like re-binding the shader for every mesh. Also still looking up uniform locations every time, though there is enough information here to cache those at load time now.

Let’s look at the entire main pass for a bit:

for (u32 i = 0; i < mesh_count; ++i) {     main_pass.Begin();     main_pass.SetRenderTarget(GL_FRAMEBUFFER, &hdr_buffer_rt_ms);     if (!visible_meshes[i]) {         continue;     }     RenderMesh * rmesh = meshes + i;     IndexedMesh * mesh = rmesh->mesh;
    Material * material = rmesh->material;

    Assert(mesh && material, "Cannot render without both mesh and material!");

    material->BindForRenderPass(&main_pass);

    main_pass.SetUniformMatrix44("clip_from_world", clip_from_world);
    main_pass.SetUniformMatrix44("world_from_local", rmesh->world_from_local);
    main_pass.SetUniform3f("view_pos", cam->position);
    main_pass.SetUniform1f("time", current_time_sec);

    main_pass.BindUBO("PointLightDataBlock", point_light_data_ubo);
    main_pass.BindUBO("SpotLightDataBlock", spot_light_data_ubo);
    main_pass.BindUBO("LightList", light_list_ubo);

    main_pass.BindTexture(GL_TEXTURE_CUBE_MAP, "irradiance_map", skybox_cubemaps.irradiance_map);
    main_pass.BindTexture(GL_TEXTURE_CUBE_MAP, "prefilter_map", skybox_cubemaps.prefilter_map);
    main_pass.BindTexture(GL_TEXTURE_2D, "brdf_lut", brdf_lut_texture);

    main_pass.BindTexture("ssao_map", ssao_blur_texture);
    main_pass.SetUniform2f("viewport_dim", hdr_buffer_rt_ms.viewport.z, hdr_buffer_rt_ms.viewport.w);

    main_pass.SetUniform3f("sun_direction", world->sun_direction);
    main_pass.SetUniform3f("sun_color", world->sun_color);
    main_pass.BindTexture("sun_shadow_map", sun_light.cascade.shadow_map);
    for (u32 split = 0; split < NUM_SPLITS; ++split) {
        char buffer[256];
        _snprintf_s(buffer, sizeof(buffer) - 1, "sun_clip_from_world[%d]", split);
        main_pass.SetUniformMatrix44(buffer, sun_light.cascade.sun_clip_from_world[split]);
    }
    mesh->SetAttribPointersForShader(main_pass.active_shader);
    mesh->Draw();
    main_pass.End();
}

There is still a *lot* of uniform setup going on per-mesh, and almost all of it is unnecessary. But, since Material binds the shader each time, all of the other uniforms need to be rebound (because BindForRenderPass() may have bound a different shader).

Ideally, here’s the inner loop we’re aiming for:

for (u32 i = 0; i < mesh_count; ++i) {     RenderMesh * rmesh = meshes + i;     IndexedMesh * mesh = rmesh->mesh;
    Material * material = rmesh->material;
    Assert(mesh && material, "Cannot render without both mesh and material!");

    material->BindForRenderPass(&main_pass);
    main_pass.SetUniformMatrix44("world_from_local", rmesh->world_from_local);
    mesh->SetAttribPointersForShader(main_pass.active_shader);
    mesh->Draw();
}

Material Instances:

When rendering the Sponza test scene, there are a few dozen materials loaded. However, there are only 3 different sets of render states: leaves (a shader doing alpha test and subsurface scattering), alpha tested, and default PBR. Within each class of material the only difference is the texture set.

If we were to separate the set of render states and the set of uniforms into different entities, we’d be able to minimize modifications to the render state in this loop. So that’s what I’ve decided to do.

A Material is a bag of render states, while a MaterialInstance is a bag of uniforms associated with a particular Material. For example, the vases, walls, columns, etc. in Sponza would all be instances of the same Material. If we sort and bucket the mesh list according to Materials, we only need to bind the full render state for each material once. (This is also a convenient point to eliminate culled meshes, removing the visibility check in the main loop.)

At least for now, I’ve done this in the most naive way possible; the list of uniforms is removed from Material and becomes MaterialInstance. Each instance also contains a pointer to its parent material. Done!

This is not a great solution, there are a lot of ways for one to shoot themselves in the foot. For example, a MaterialInstance that doesn’t contain the full set of expected uniforms (will render with stale data), or containing extras (will assert when the uniform bind fails). The Material should probably have “base instance” that defines what set of uniforms are required, and defaults for them; each instance would validate calls against this base. I have not implemented this yet.

Here’s where we end up with MaterialInstance:

BucketedRenderList render_list = MakeBucketedRenderList(meshes, mesh_count, visible_meshes);

for (RenderBucket bucket : render_list.buckets) {
    main_pass.Begin();
    main_pass.SetRenderTarget(GL_FRAMEBUFFER, &hdr_buffer_rt_ms);

    main_pass.BindMaterial(bucket.material);

    // Additional global setup, as above.

    for (u32 i = bucket.start; i < bucket.end; ++i) {
        assert(i < render_list.mesh_count);
        RenderMesh * rmesh = &render_list.mesh_list[i];
        IndexedMesh * mesh = rmesh->mesh;
        MaterialInstance * material_instance = rmesh->material_instance;

        RenderPass main_subpass = main_pass.BeginSubPass();

        main_subpass.BindMaterialInstance(material_instance);
        main_subpass.SetUniformMatrix44("world_from_local", rmesh->world_from_local);
        mesh->SetAttribPointersForShader(main_subpass.active_shader);
        mesh->Draw();

        main_pass.EndSubPass(main_subpass);
    }

    main_pass.End();
}

(Material binding has been moved into RenderPass, which is in hindsight a more natural place for it to live.)

By carving the mesh list up into buckets by Material, we only need to do the global setup once for each bucket, and then only set the specific instance materials per-mesh. Even better, the check for culled objects disappears. And this list is reusable between the depth prepass and main render pass.

Still a lot of room for performance improvement: the RenderMesh struct causes a lot of unnecessary pointer chasing, meshes using the same instance of a material could be batched as well, there are *sprintf* calls in the outer loop. It’s pretty clear I need to spend a lot more time here.

However, this is progress! More importantly, Materials are now general enough I can implement the terrain materials. So that’s next.

Advertisements

hikari – Designing a Terrain System

The next big addition to hikari is going to be a system for rendering terrain. I’ve done a few weeks of research and wanted to write up my plans before¬†implementing. Then, once it’s complete, I can review what did and did not work.

That’s the idea, anyway.

Source Format:

I decided to use simple heightmap-based terrain. Starting with a regular grid of points, offset each vertically based on a grayscale value stored in an image. This is not the only (or best) choice, but it is the simplest to implement as a programmer with no art team at hand. Other options include marching-cubes voxels, constructive solid geometry, and hand-authored meshes.

However, I’m consciously trying to avoid depending on the specific source of the mesh in the rest of the pipeline. This would allow any of (or even a mix of) these sources to work interchangeably. We’ll see how realistic that expectation is in practice.

Chunking:

Terrain meshes can be quite large, especially in the case of an open-world game where the map may cover tens of kilometers in any direction, while still maintaining detail up close. Working with such a large mesh is unwieldy, so let’s not.

Instead, we cut the terrain up into chunks. For example, if our world map is 4km x 4km, we could cut it up into 1600 separate 100m x 100m chunks. Now we don’t need to keep the entire map in memory, only the chunks that we might need to render in the near future (e.g, chunks around the camera.)

There are additional benefits to chunking the map; for example, the ability to frustum cull the chunks. Chunks provide a convenient granularity for applying level-of-detail optimizations as well.

Materials:

The mesh is only part of our terrain, of course. It needs texture and color from materials: grass, rock, snow, etc. Importantly, we don’t necessarily want to texture every part of the terrain with the same material. One solution would be to assign a single material to each chunk, and have an artist paint a unique texture.

This seems to be a non-starter to me — potentially thousands of textures (of high resolution) would need to be created and stored, not to mention streamed into and out of memory alongside their mesh chunks. In addition, producing a distortion-free UV-mapping for terrain is difficult. Finally, while we don’t want to texture everything the same, there are only a limited set of materials we want to use, and it’d be nice if we could reuse the same textures.

Enter splat maps. The basic idea is straightforward: assign a color to the terrain. The components of that color determine the blend weights of a set of materials. If the red channel weights a rock texture and the green channel weights a grass texture, ‘yellow’ terrain is a blend between rock and grass. This allows us to smoothly blend between 5 different materials.

Wait, 5? How?

A color has four components, red, green, blue, and alpha. However, if we assume the material weights must add to equal 1.0, we can construct a new weight that is (1.0 – (r + g + b + a)). This becomes the weight of our 5th material. (Of course, we need to actually enforce these constraints in art production, but that is a separate issue.)

Furthermore, since chunks of the terrain get rendered in their own draw call, there’s no reason we can’t swap the materials used in each chunk. So while the number of materials in a specific chunk is limited, the total number of materials is unlimited (well, close enough).

The name “splat map” may imply using a texture to assign this color (and, for a heightmap terrain, that’s the easiest way to store it on disk), but I actually want to use vertex colors. This puts heightmaps and authored meshes on the same footing, and keeps decisions about mesh source in the mesh loading where it belongs.

In order to do texturing, we need texture coordinates. The obvious way to do this is assign a UV grid to our heightmap mesh. This will look fine for smooth, subtle terrain features, but the more drastic the slope the more distorted the texturing will be.

Instead, we can use so-called triplanar mapping to generate texture coordinates. Project the world position to each of the XY, YZ, and ZX planes, and use that value as the texture coordinate for three different lookups. Then, blend between them based on the vertex normal.

As an added bonus, there’s no requirement that the various axes in triplanar mapping use the same material. By mixing materials, we can create effects such as smooth grass that fades to patches of dirt on slopes.

Of course, there is a cost to all of this. Triplanar mapping requires doing 3x the material samples. Splat mapping requires doing up to 5x the samples. Together, it’s up to 15x the material samples, and we haven’t even begun to talk about the number of individual textures in each material. Needless to say, rendering terrain in this fashion is going to read a *lot* of texture samples per-pixel.

That said, hikari already uses a depth prepass to avoid wasted shading on opaque geometry, so I suspect that would mitigate the worst of the cost. In addition, it’s easy enough to write another shader for more distant chunks that only uses the 2-3 highest weights out of the splat where that’s likely to be an unnoticeable difference.

Plumbing:

There are a bunch of assumptions that hikari makes regarding meshes and materials that are a bit at odds with what I’ve mentioned above. The renderer is simple enough that I can work around these assumptions for special cases. However, I will need to be on the look out for opportunities to make some of the rendering systems (or, at least, the exposed API) more general.

Some examples: at the moment, meshes are assumed to use a specific common vertex format (position, UV, normal, tangent) — terrain meshes will not (no need for UV and tangent, but we need the splat color.) Meshes are expected to use only one material, and materials do not pack any texture maps together (so, for example, baked AO and roughness are separate maps). Neither of these will play nicely with terrain, but should be simple enough to fix. “Materials” are currently just a bag of textures and a handful of uniform parameters, since at the moment there are only two mesh shaders in the engine. A more generic material model that ties directly into its associated shader would be a useful thing to fix.

So, there’s the plan of attack. We’ll see how well it stacks up. I’ll be documenting the implementation of each part of the system in a series of blog posts, capped off by a final report on what worked and where I got it totally wrong.

hikari – Implementing Single-Scattering

Over the past few days, I’ve been implementing a single-scattering volumetric fog effect in hikari, my OpenGL PBR renderer.

I started by more-or-less copying Alexandre Pestana’s implementation (http://www.alexandre-pestana.com/volumetric-lights/), porting it to GLSL and fitting it into my existing render pipeline.

However, there’s an issue: by not taking into account the transmittance along the ray, instead taking an average of all scattered samples, the effect is far more severe than it should be. In addition, it blends really poorly with the scene (using additive blending after the lighting pass.)

Bart Wronski describes a more physically-correct model in his presentation on AC4’s volumetric fog (https://bartwronski.com/publications/). I did not go the full route of using a voxel grid (although I may in the future; this renderer uses clustered shading so that’s a natural extension). However, Wronski presents the correct integration to account for transmittance, and we can apply that to the post-effect approach as well.

Background out of the way, I wanted an implementation that was:
– Minimally invasive. If possible, as simple as adding a new render pass and blend. No compute shaders or large data structures.
– Tweakable. Light dust to thick fog.
– Plausible. I don’t need absolute physical accuracy, but something that fits well in the scene and looks “right”.

The core loop is (relatively) simple:

// Accumulated in-scatter.
vec3 inscatter_color = vec3(0.0);
// Accumulated density.
float total_density = 0.0;
for (int i = 0; i < STEP_COUNT; ++i) {
    // Sample sun shadow map.
    vec3 sun_clip_position = GetClipSpacePosition(current_position, sun_clip_from_view);
    float shadow_factor = SampleShadowPoint(sun_shadow_map, sun_clip_position.xy, sun_clip_position.z);

    // Calculate total density over step.
    float step_density = density * step_distance;

    // Calculate transmittance of this sample (based on total density accumulated so far).
    float transmittance = min(exp(-total_density), 1.0);

    // Sun light scatter.
    inscatter_color += sun_color * shadow_factor * sun_scatter_amount * step_density * transmittance;

    // Ambient scatter.
    inscatter_color += ambient_light_color * ambient_scatter_amount * step_density * transmittance;

    // Accumulate density.
    total_density += step_density;

    // Step forward.
    current_position += step_vector;
}

The raymarch operates in view space. This was mostly a matter of convenience, but also means we maintain better precision close to the camera, even if we happen to be far away from the world origin.

Currently, I only compute scattering for sunlight and a constant ambient term. (Coming from directly up.) If you have ambient probes stored as spherical harmonics, you can compute a much better ambient term — see Wronski’s slides for details. Other light types require recomputing the scatter amount per-sample, as it depends on the angle between the light and view.

Note the scaling by transmittance. The light scattered by a sample has to travel through all of the particles we’ve seen to this point, and therefore it is attenuated by the scattering and absorption.

Density is (very roughly) a measure of particle density per unit length. (I don’t 100% understand the physical basis here, but it is dependent on both particle size and number.) The implementation currently uses a completely uniform value for density, but this could easily be sampled from a texture map (artist painted, noise, particle system, etc.) In practice, good-looking density values tend to be very low:

[0.00015]
density00015

[0.00025]
density00025

[0.0025]
density0025

[0.005]
density005
(g = -0.85)

You can smooth the results of higher densities by using more samples (or blurring the result.)

The other control parameter for the scattering is g. This is used in the phase function to compute the amount of light scattered towards the viewer. The valid range for g is [-1, 1]. In general, most atmospheric particles will have a negative g value, that increases in magnitude as the particle size increases. Some good values to start with are between -0.75 and -0.99.

[-0.75]
g-75

[-0.95]
g-95
(density = 0.00025)

At low sample counts, the result is highly susceptible to banding artifacts. To combat this, we jitter the starting position by moving some fraction of a step in the direction of the ray. (This is the same approach used by Pestana.) In order to keep the noise unobjectionable, we need to use either an even pattern like a Bayer matrix, or blue noise. I’ve also experimented with dithering the step distance as well, such that neighboring pixels use different step sizes. However, this does not seem to produce much benefit over just jittering the start position; the resulting noise is more noticeable.

[no jitter]
density0025nojitter
[jitter]
density0025
(density = 0.0025, g = -0.85)

The outputs of this shader are the accumulated scattered light (in RGB) and the final transmittance amount (in alpha). I then perform a bilateral upscale (directly ported from Pestana’s HLSL version) and blend it with the lighting buffer using glBlendFunc(GL_ONE, GL_SRC_ALPHA). The transmittance is the amount of light that reaches the viewer from the far end of the ray, so this is accurate.

For higher densities or more uniform scattering (g closer to 0), we may want to perform a depth-aware blur on the scatter buffer before upscaling. Since I didn’t have an immediate need for really thick scatter, I have not implemented this yet.

The scattering should be added before doing the luminance calculation and bloom. This helps to (somewhat) mitigate the darkening effect of unlit areas, as well as further smoothing the edges of light shafts.

There’s still a lot to be done here: supporting additional lights, variable density, etc. But even the basic implementation adds a lot when used subtly.

Orthographic LODs – Part II

Last time, we got the basic renderer up and running. Now we have more complex issues to deal with. For example, how to light the LOD models.

Lighting

One approach to lighting is to simply light the detailed model, and bake that lighting into the LOD texture. This works, but requires that all lights be static. We can do better.

The standard shading equation has three parts: ambient light, diffuse light, and specular light:
K_a=AT
K_d=DT(N \cdot L)
K_s=S(N \cdot H)^m (N \cdot L > 0)
(For details of how these equations work, see Wikipedia.)

The key thing to notice is that these equations rely only on the texture (which we already have), a unit direction towards the light source, a unit direction towards the camera, and the unit surface normal. By rendering our detailed model down to a texture, we’ve destroyed the normal data. But that’s easy enough to fix: we write it to a texture as well.

(Setup for this texture is more-or-less the same as the color texture, we just bind it to a different output.)

// Detail vertex shader
in vec3 position;
in vec3 normal;

out vec3 Normal;

void main() {
    gl_Position = position;
    Normal = normal;
}

// Detail fragment shader

in vec3 Normal;

out vec4 frag_color;
out vec4 normal_map_color;

void main() {
    vec3 normal = (normalize(Normal) + vec3(1.0)) / 2.0;

    frag_color = vec4(1.0);
    normal_map_color = vec4(normal, 1.0);
}

A unit normal is simply a vector of 3 floats between -1 and 1. We can map this to a RGB color (vec3 clamped to [0, 1]) by adding one and dividing by two. This is, again, nothing new. You may be wondering why we store an alpha channel — we’ll get to that later.

We can now reconstruct the normal from the LOD texture. This is all we need for directional lights:

// Fragment shader
uniform sampler2D color_texture;
uniform sampler2D normal_map_texture;

uniform vec3 sun_direction;
uniform vec3 camera_direction;

in vec2 uv;

out vec4 frag_color;

void main() {
    vec4 normal_sample = texture(normal_map_texture, uv);
    vec3 normal = (normal.xyz * 2.0) - vec3(1.0);
    vec3 view = -1.0 * camera_direction;
    vec3 view_half = (normal + view) / length(normal + view);

    float n_dot_l = dot(normal, sun_direction);
    float n_dot_h = dot(normal, view_half);

    float ambient_intensity = 0.25;
    float diffuse_intensity = 0.5;
    float specular_intensity = 0.0;
    if (n_dot_l > 0) {
        specular_intensity = 0.25 * pow(n_dot_h, 10);
    }

    vec4 texture_sample = texture(color_texture, uv);

    vec3 lit_color = ambient_intensity * texture_sample.rgb +
                     diffuse_intensity * texture_sample.rgb +
                     specular_intensity * vec3(1.0);

    frag_color = vec4(lit_color, texture_sample.a);
}

Result:
lod_test_06

Point / Spot Lighting

So this solves directional lights. However, point and spot lights have attenuation — they fall off over distance. Additionally, they have an actual spatial position, which means the light vector will change.

After rendering the LODs, we no longer have the vertex positions of our detail model. Calculating lighting based on the LOD model’s vertices will, in many cases, be obviously wrong. How can we reconstruct the original points?

By cheating.

Let’s look at what we have to work with:

The camera is orthographic, which means that depth of a pixel is independent of where it is on screen. We have the camera’s forward vector (camera_direction in the above shader).

Finally, and most obviously, we don’t care about shading the points that weren’t rendered to our LOD texture.

This turns out to be the important factor. If we knew how far each pixel was from the camera when rendered, we could reconstruct the original location for lighting.

To put it another way, we need a depth buffer. Remember that normal map alpha value we didn’t use?

We can get the depth value like so:

// Map depth from [near, far] to [-1, 1]
float depth = (2.0 * gl_FragCoord.z - gl_DepthRange.near - gl_DepthRange.far) / (gl_DepthRange.far - gl_DepthRange.near);

// Remap normal and depth from [-1, 1] to [0, 1]
normal_depth_color = (vec4(normalize(normal_vec), depth) + vec4(1.0)) / 2.0;

However, we now run into a problem. This depth value is in the LOD render’s window space. We want the position in the final render’s camera space — this way we can transform the light position to the same space, which allows us to do the attenuation calculations.

There are a few ways to handle this. One is to record the depth range for our LOD model after rendering it, and then use that to scale the depth properly. This is complex (asymmetrical models can have different depth sizes for each rotation) and quite likely inconsistent, due to precision errors.

A simpler solution, then, is to pick an arbitrary depth range, and use it consistently for both the main render and LODs. This is not ideal — one getting out of sync with the other may lead to subtle errors. In a production design, it may be a good idea to record the camera matrix used in the model data, so that it can be confirmed on load.

We need two further pieces of data to proceed: the viewport coordinates (the size of the window) and the inverse projection matrix (to “unproject” the position back into camera space). Both of these are fairly trivial to provide. To reconstruct the proper position, then:

vec3 window_to_camera_space(vec3 window_space) {
    // viewport = vec4(x, y, width, height)

    // Because projection is orthographic, NDC == clip space
    vec2 clip_xy = ((2.0 * window_space.xy) - (2.0 * viewport.xy)) / (viewport.zw) - 1.0;
    // Already mapped to [-1, 1] by the same transform that extracts our normal.
    float clip_z = window_space.z

    vec4 clip_space = vec4(clip_xy, clip_z, 1.0);

    vec4 camera_space = camera_from_clip * clip_space;
    return camera_space.xyz;
}

(Note that this is rather inefficient. We *know* what a projection matrix looks like, so that last multiply can be made a lot faster.)

Calculating the light color is the same as for directional lights. For point lights, we then divide by an attenuation factor:

float attenuation = point_light_attenuation.x +
                    point_light_attenuation.y * light_distance +
                    point_light_attenuation.z * light_distance * light_distance;

point_light_color = point_light_color / attenuation;

This uses 3 factors: constant, linear, and quadratic. The constant factor determines the base intensity of the light (a fractional constant brightens the light, a constant greater than 1.0 darkens it.) The linear and quadratic factors control the fall-off curve.

A spot light has the same attenuation factors, but also has another term:

float spot_intensity = pow(max(dot(-1.0 * spot_direction, light_direction), 0.0), spot_exponent);

The dot product here limits the light to a cone around the spotlight’s facing direction. The exponential factor controls the “tightness” of the light cone — as the exponent increases, the fall-off becomes much sharper (remember that the dot product of unit vectors produces a value in [0, 1]).

So that covers lighting. Next time: exploring options for shadows.

lod_test_09

NOTE:

I made some errors with terminology in my last post. After the projection transform is applied, vertices are in *clip* space, not eye space. The series of transforms is as follows:

Model -> World -> Camera -> Clip -> NDC
(With an orthographic projection, Clip == NDC.)

Orthographic LODs – Part I

NOTE: I’m going to try something new here: rather than write about something I have already solved, this series will be my notes as I work through this problem. As such, this is *not* a guide. The code is a pile of hacks and I will make silly mistakes.

The Problem:

I am working on a city-building game. I want to be able to use detailed models with large numbers of polygons and textures, but without the runtime rendering cost that entails. To do so, I want to pre-render detailed models as textures for lower level-of-detail (LOD) models. This is not a particularly original solution (it is the system SimCity 4 uses). There are a few restrictions implied by this choice:

  • Camera projection must be orthographic.
    • A perspective projection will appear to skew the model the further it gets from the center of the view. Orthographic projection will not.
  • Camera angle must be fixed (or a finite set of fixed positions).
    • While camera *position* is irrelevant (because an orthographic projection doesn’t affect depth), its rotation is not. Each camera angle needs its own LOD image. Therefore, we need a small number of them.

The process is simple enough: render the detail model for each view. Projecting the vertices of the LOD model with the same view produces the proper texture coordinates for the LOD. Then, pack all of the rendered textures into an atlas.

Orthographic Rendering

Rendering an orthographic projection is, conceptually, very straightforward. After transforming the world to camera space (moving the camera to the origin, and aligning the z-axis with the camera’s forward view), we select a box of space and map it onto a unit cube (that is, a cube with a minimum point of (-1, -1, -1) and a maximum point of (1, 1, 1).) The z-component of each point is then discarded.

I will not get into the math here. Wikipedia has a nicely concise explanation.

What this *means* is that we can fit this box tightly to the model we want to render. With clever application of glViewport, we could even render all of our views in one go. (At the moment, I have not yet implemented this.) This makes building the texture atlas much simpler.

Calculating Camera-space LOD

To get this tight fit, we need to know the bounds of the rendered model in camera space. Since we are projecting onto the LOD model, *its* bounds are what we’re concerned with. (This implies, by the way, that the LOD model must completely enclose the detail model.)

struct AABB {
    vec3 min;
    vec3 max;
};

AABB
GetCameraSpaceBounds(Model * model, mat4x4 camera_from_model) {
    AABB result;

    vec4 * model_verts = model->vertices;
    vec4 camera_vert = camera_from_model * model_verts[0];

    // We don't initialize to 0 vectors, because we don't
    // guarentee the model is centered on the origin.
    result.min = vec3(camera_vert);
    result.max = vec3(camera_vert);

    for (u32 i = 1; i < model->vertex_count; ++i) {
        camera_vert = camera_from_model * model_verts[i];

        result.min.x = min(result.min.x, camera_vert.x);
        result.min.y = min(result.min.y, camera_vert.y);
        result.min.z = min(result.min.z, camera_vert.z);

        result.max.x = max(result.max.x, camera_vert.x);
        result.max.y = max(result.max.y, camera_vert.y);
        result.max.z = max(result.max.z, camera_vert.z);
    }

    return result;
}

This bounding box gives us the clip volume to render.

AABB lod_bounds = GetCameraSpaceBounds(lod_model, camera_from_model);

mat4x4 eye_from_camera = OrthoProjectionMatrix(
    lod_bounds.max.x, lod_bounds.min.x,  // right, left
    lod_bounds.max.y, lod_bounds.min.y,  // top, bottom
    lod_bounds.min.z, lod_bounds.max.z); // near, far

mat4x4 eye_from_model = eye_from_camera * camera_from_model;

Rendering LOD Texture

Rendering to a texture requires a framebuffer. (In theory, we could also simply render to the screen and extract the result with glGetPixels. However, that limits us to a single a single color output and makes it more difficult to do any GPU postprocessing.)

After binding a new framebuffer, we need to create the texture to render onto:

glGenTextures(1, &result->texture);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, result->texture);

// render_width and render_height are the width and height of the 
// camera space lod bounding box.
glTexImage2D(
    GL_TEXTURE_2D, 0, GL_RGBA, render_width, render_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0
);

glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, result->texture, 0);

glViewport(0, 0, render_width, render_height);

This gives us a texture large enough to fit our rendered image, attaches it to the framebuffer, and sets the viewport to the entire framebuffer. To render multiple views to one texture, we’d allocate a larger texture, and use the viewport call to selected the target region.

Now we simply render the detail model with the eye_from_model transform calculated earlier. The result:

lod_test_02
(The detail shader used simply maps position to color.)

That’s all for now. Next time: how do we light this model?