blocks are not structs!
|
|
TRIM6
uniform float Deformation;
uniform Crazy80s {
float Madonna;
int DuranDuran;
};
uniform Transform {
mat4 ModelViewMatrix;
float Scale;
} transforms[4];
...
float a = Deformation;
float b = Madonna;
float c = transforms[2].Scale;
TRIM6
GLuint loc = glGetUniformLocation(prog, "Deformation");
glUniform1f(loc, 3.14159f);
GLuint idx = glGetUniformBlockIndex(prog, "Transform[2]");
TRIM6
layout(std140) uniform Crazy80s { float Madonna[2]; };
TRIM6
GLuint ubo;
glGenBuffers(1, &ubo);
// Choose a binding point in the UBO; must be < GL_MAX_UNIFORM_BUFFER_BINDINGS
GLuint bp = 7;
// Fill the buffer with data at the chosen binding point
glBindBufferBase(GL_UNIFORM_BUFFER, bp, ubo);
float data[2] = { 3.142f, 2.712f }
glBufferData(GL_UNIFORM_BUFFER, sizeof(data), data, GL_STATIC_DRAW);
// Query the shader for block index of 'Crazy80s' and hook it up
GLuint idx = glGetUniformBlockIndex(prog, "Crazy80s");
glUniformBlockBinding(prog, idx, bp);
|
|
|
|
Don't use these built-ins; they're extinct! Provide custom names & types for your fragment shader outputs according to what's actually being stored in your FBO.
|
|
|
|
OpenGL lets you manipulate depth in your fragment shader. However, for best performance you might want to let OpenGL perform depth testing earlier by using the early_fragment_tests flag. You can also give it hints about how you're manipulating Z, e.g., depth_greater.
in vec4 gl_FragCoord; // has a valid z value out float gl_FragDepth; layout(early_fragment_tests) in; layout (depth_greater) out float gl_FragDepth; |
Subroutines act like function pointers, allowing you to hot-swap pieces of shader in and out.
|
|
Separable programs also allow you to hot-swap shaders, but at a higher level of granularity than subroutines.
static GLuint LoadPipeline(
const char* vsSource,
const char* gsSource,
const char* fsSource)
{
GLuint vsProgram = glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &vsSource);
GLuint gsProgram = glCreateShaderProgramv(GL_GEOMETRY_SHADER, 1, &gsSource);
GLuint fsProgram = glCreateShaderProgramv(GL_FRAGMENT_SHADER, 1, &fsSource);
GLuint pipeline;
glGenProgramPipelines(1, &pipeline);
glBindProgramPipeline(pipeline);
glUseProgramStages(pipeline, GL_VERTEX_SHADER_BIT, vsProgram);
glUseProgramStages(pipeline, GL_GEOMETRY_SHADER_BIT, gsProgram);
glUseProgramStages(pipeline, GL_FRAGMENT_SHADER_BIT, fsProgram);
// glUniform* now heed the "active" shader program rather than glUseProgram
glActiveShaderProgram(pipeline, vsProgram);
glUniform1f(fooLocation, 1.0f);
return pipeline;
}
...
glProgramParameteri(programHandle, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
glLinkProgram(programHandle);
GLuint bufSize;
glGetProgramiv(programHandle, GL_PROGRAM_BINARY_LENGTH, &bufSize);
std::vector buffer(bufSize);
GLenum binaryFormat;
glGetProgramBinary(programHandle, bufSize, NULL, &binaryFormat, &buffer[0]);
// use a cached program on subsequent runs:
glProgramBinary(programHandle, binaryFormat, &buffer[0], bufSize);
Desktop OpenGL inherited this feature from OpenGL ES. Beware however; the binary format isn't portable at all. My personal preference is to avoid this feature unless I desperately need it.