Anatomy of a Block

blocks are not structs!

-- Vertex Shader

out MyBlock {
    vec3 Position;
    vec3 Color[2];
    float Opacity;
} Out;

-- Geometry Shader

in MyBlock {
    vec3 Position;
    vec3 Color[2];
    float Opacity;
} In[];
-- Vertex Shader

// Built-ins:
out gl_PerVertex {
    vec4 gl_Position;
    float gl_PointSize;
    float gl_ClipDistance[];
};

// User-defined:
in MyBlock {
    float w; // glGetAttribLocation(program, "MyBlock.w");
} In;

void main()
{
    gl_Position = vec4(1, 0, 0, In.w);
}
Uniform Blocks
TRIM6
      uniform float Deformation;
      
      uniform Crazy80s {
          float Madonna;
          int DuranDuran;
      };
      
      uniform Transform {
          mat4 ModelViewMatrix;
          float Scale;
      } transforms[4];
      
      ...
      
      float a = Deformation;
      float b = Madonna;
      float c = transforms[2].Scale;
  

TRIM6
      GLuint loc = glGetUniformLocation(prog, "Deformation");
      glUniform1f(loc, 3.14159f);
      
      GLuint idx = glGetUniformBlockIndex(prog, "Transform[2]");
  
Uniform Buffers
UBO handle (aka name)
passed to glBindBufferBase to affect subsequent glBufferData, glMapBuffer, etc
block index
queried from the shader via glGetUniformBlockIndex
binding point
passed to glBindBufferBase to affect subsequent glBufferData, glMapBuffer, etc
passed to glUniformBlockBinding to "link" the UBO to the uniform block
note:this can now be specified in GLSL using layout rather than glUniformBlockBinding
TRIM6
      layout(std140) uniform Crazy80s { float Madonna[2]; };
  

TRIM6
      GLuint ubo;
      glGenBuffers(1, &ubo);
      
      // Choose a binding point in the UBO; must be < GL_MAX_UNIFORM_BUFFER_BINDINGS
      GLuint bp = 7;
      
      // Fill the buffer with data at the chosen binding point
      glBindBufferBase(GL_UNIFORM_BUFFER, bp, ubo);
      float data[2] = { 3.142f, 2.712f }
      glBufferData(GL_UNIFORM_BUFFER, sizeof(data), data, GL_STATIC_DRAW);
      
      // Query the shader for block index of 'Crazy80s' and hook it up
      GLuint idx = glGetUniformBlockIndex(prog, "Crazy80s");
      glUniformBlockBinding(prog, idx, bp);
  

Binding Vertex Attributes
// Worst: let the compiler decide
GLuint foo = glGetAttribLocation(program, "MyBlock.w");              
// Better: Specify in application code
GLuint foo = 3;

glCompileShader(vsHandle);
glAttachShader(programHandle, vsHandle);
glBindAttribLocation(programHandle, foo, "MyBlock.w");
glLinkProgram(programHandle);              
// Best: Declare in GLSL
in MyBlock {
    layout(location = 3) vec3 w;
}              
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(foo, 1, GL_FLOAT, GL_FALSE, stride, 0);
glEnableVertexAttribArray(foo);

Don't use these built-ins; they're extinct! Provide custom names & types for your fragment shader outputs according to what's actually being stored in your FBO.


vec4 gl_FragColor
vec4 gl_FragData[n]

extinct

Binding Fragment Outputs
// Let the compiler decide (not recommended)
GLuint colorNumber = glGetFragDataLocation(program, "MyColorVariable");
// Specify in application code
GLuint colorNumber = 3;
glBindFragDataLocation(programHandle, colorNumber, "MyColorVariable");
// Declare in GLSL
layout(location = 3) out vec4 factor;
 // Beware, a level of indirection!
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, myFbo);

GLenum buffers[] = {GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1};
glDrawBuffers(2, &buffers[0]);

OpenGL lets you manipulate depth in your fragment shader. However, for best performance you might want to let OpenGL perform depth testing earlier by using the early_fragment_tests flag. You can also give it hints about how you're manipulating Z, e.g., depth_greater.


in vec4 gl_FragCoord; // has a valid z value

out float gl_FragDepth;
layout(early_fragment_tests) in;

layout (depth_greater) out float gl_FragDepth;
Subroutines

Subroutines act like function pointers, allowing you to hot-swap pieces of shader in and out.

-- Vertex Shader

subroutine vec3 IlluminationFunc(vec3 N, vec3 L);

subroutine(IlluminationFunc)
vec3 diffuse(vec3 N, vec3 L)
{
    return max(0, dot(N, L));
}

subroutine(IlluminationFunc)
vec3 specular(vec3 N, vec3 L)
{
    vec3 E = vec3(0, 0, 1);
    vec3 H = normalize(L + E);
    return pow(dot(N, H), Shininess);
}

uniform float Shininess = 1.0;
subroutine uniform IlluminationFunc IlluminationVar;

out vec4 vColor;
void main()
{
    vec3 n = vec3(0, 0, 1);
    vec3 p = vec3(3, 1, 4);
    vec3 c = IlluminationVar(n, p);
    vColor = vec4(c, 1);
}

-- Geometry Shader

// normal uniforms are scoped to the program object:
uniform float Shininess = 1.0;

// subroutines are scoped to the shader stage:
subroutine vec3 IlluminationFunc(float foo);
subroutine uniform IlluminationFunc IlluminationVar;
GLuint prog;
glGetIntegerv(GL_CURRENT_PROGRAM, &prog);

GLenum vs = GL_VERTEX_SHADER;

GLuint illum = glGetSubroutineUniformLocation(prog, vs,
                                              "IlluminationVar");

GLuint diffuse = glGetSubroutineIndex(prog, vs, "diffuse");
GLuint specular = glGetSubroutineIndex(prog, vs, "specular");

// This sets per-context state:
GLuint indices[MAX_SUBROUTINE_VARIABLES];
indices[illum] = diffuse;
glUniformSubroutinesuiv(GL_VERTEX_SHADER, 1, indices);

// This sets per-program state:
GLuint shiny = glGetUniformLocation(prog, "Shininess");
glUniform1f(prog, shiny, 1.0);

Separable programs also allow you to hot-swap shaders, but at a higher level of granularity than subroutines.

static GLuint LoadPipeline(
        const char* vsSource,
        const char* gsSource,
        const char* fsSource)
{
    GLuint vsProgram = glCreateShaderProgramv(GL_VERTEX_SHADER, 1, &vsSource);
    GLuint gsProgram = glCreateShaderProgramv(GL_GEOMETRY_SHADER, 1, &gsSource);
    GLuint fsProgram = glCreateShaderProgramv(GL_FRAGMENT_SHADER, 1, &fsSource);

    GLuint pipeline;
    glGenProgramPipelines(1, &pipeline);
    glBindProgramPipeline(pipeline);

    glUseProgramStages(pipeline, GL_VERTEX_SHADER_BIT, vsProgram);
    glUseProgramStages(pipeline, GL_GEOMETRY_SHADER_BIT, gsProgram);
    glUseProgramStages(pipeline, GL_FRAGMENT_SHADER_BIT, fsProgram);

    // glUniform* now heed the "active" shader program rather than glUseProgram
    glActiveShaderProgram(pipeline, vsProgram);
    glUniform1f(fooLocation, 1.0f);

    return pipeline;
}
Separable Programs
      ...

glProgramParameteri(programHandle, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
glLinkProgram(programHandle);

GLuint bufSize;
glGetProgramiv(programHandle, GL_PROGRAM_BINARY_LENGTH, &bufSize);

std::vector buffer(bufSize);

GLenum binaryFormat;
glGetProgramBinary(programHandle, bufSize, NULL, &binaryFormat, &buffer[0]);
    
      // use a cached program on subsequent runs:
glProgramBinary(programHandle, binaryFormat, &buffer[0], bufSize);
Shader Binaries

Desktop OpenGL inherited this feature from OpenGL ES. Beware however; the binary format isn't portable at all. My personal preference is to avoid this feature unless I desperately need it.