/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include rect,render_task,gpu_cache,transform #define EXTEND_MODE_CLAMP 0 #define EXTEND_MODE_REPEAT 1 #define SUBPX_DIR_NONE 0 #define SUBPX_DIR_HORIZONTAL 1 #define SUBPX_DIR_VERTICAL 2 #define SUBPX_DIR_MIXED 3 #define RASTER_LOCAL 0 #define RASTER_SCREEN 1 uniform sampler2DArray sPrevPassAlpha; uniform sampler2DArray sPrevPassColor; vec2 clamp_rect(vec2 pt, RectWithSize rect) { return clamp(pt, rect.p0, rect.p0 + rect.size); } // TODO: convert back to RectWithEndPoint if driver issues are resolved, if ever. flat varying vec4 vClipMaskUvBounds; // XY and W are homogeneous coordinates, Z is the layer index varying vec4 vClipMaskUv; #ifdef WR_VERTEX_SHADER #define COLOR_MODE_FROM_PASS 0 #define COLOR_MODE_ALPHA 1 #define COLOR_MODE_SUBPX_CONST_COLOR 2 #define COLOR_MODE_SUBPX_BG_PASS0 3 #define COLOR_MODE_SUBPX_BG_PASS1 4 #define COLOR_MODE_SUBPX_BG_PASS2 5 #define COLOR_MODE_SUBPX_DUAL_SOURCE 6 #define COLOR_MODE_BITMAP 7 #define COLOR_MODE_COLOR_BITMAP 8 #define COLOR_MODE_IMAGE 9 uniform HIGHP_SAMPLER_FLOAT sampler2D sPrimitiveHeadersF; uniform HIGHP_SAMPLER_FLOAT isampler2D sPrimitiveHeadersI; // Instanced attributes in ivec4 aData; #define VECS_PER_PRIM_HEADER_F 2U #define VECS_PER_PRIM_HEADER_I 2U struct Instance { int prim_header_address; int picture_task_address; int clip_address; int segment_index; int flags; int resource_address; int brush_kind; }; Instance decode_instance_attributes() { Instance instance; instance.prim_header_address = aData.x; instance.picture_task_address = aData.y >> 16; instance.clip_address = aData.y & 0xffff; instance.segment_index = aData.z & 0xffff; instance.flags = aData.z >> 16; instance.resource_address = aData.w & 0xffffff; instance.brush_kind = aData.w >> 24; return instance; } struct PrimitiveHeader { RectWithSize local_rect; RectWithSize local_clip_rect; float z; int specific_prim_address; int transform_id; ivec4 user_data; }; PrimitiveHeader fetch_prim_header(int index) { PrimitiveHeader ph; ivec2 uv_f = get_fetch_uv(index, VECS_PER_PRIM_HEADER_F); vec4 local_rect = TEXEL_FETCH(sPrimitiveHeadersF, uv_f, 0, ivec2(0, 0)); vec4 local_clip_rect = TEXEL_FETCH(sPrimitiveHeadersF, uv_f, 0, ivec2(1, 0)); ph.local_rect = RectWithSize(local_rect.xy, local_rect.zw); ph.local_clip_rect = RectWithSize(local_clip_rect.xy, local_clip_rect.zw); ivec2 uv_i = get_fetch_uv(index, VECS_PER_PRIM_HEADER_I); ivec4 data0 = TEXEL_FETCH(sPrimitiveHeadersI, uv_i, 0, ivec2(0, 0)); ivec4 data1 = TEXEL_FETCH(sPrimitiveHeadersI, uv_i, 0, ivec2(1, 0)); ph.z = float(data0.x); ph.specific_prim_address = data0.y; ph.transform_id = data0.z; ph.user_data = data1; return ph; } struct VertexInfo { vec2 local_pos; vec4 world_pos; }; VertexInfo write_vertex(RectWithSize instance_rect, RectWithSize local_clip_rect, float z, Transform transform, PictureTask task) { // Select the corner of the local rect that we are processing. vec2 local_pos = instance_rect.p0 + instance_rect.size * aPosition.xy; // Clamp to the two local clip rects. vec2 clamped_local_pos = clamp_rect(local_pos, local_clip_rect); // Transform the current vertex to world space. vec4 world_pos = transform.m * vec4(clamped_local_pos, 0.0, 1.0); // Convert the world positions to device pixel space. vec2 device_pos = world_pos.xy * task.device_pixel_scale; // Apply offsets for the render task to get correct screen location. vec2 final_offset = -task.content_origin + task.common_data.task_rect.p0; gl_Position = uTransform * vec4(device_pos + final_offset * world_pos.w, z * world_pos.w, world_pos.w); VertexInfo vi = VertexInfo( clamped_local_pos, world_pos ); return vi; } float cross2(vec2 v0, vec2 v1) { return v0.x * v1.y - v0.y * v1.x; } // Return intersection of line (p0,p1) and line (p2,p3) vec2 intersect_lines(vec2 p0, vec2 p1, vec2 p2, vec2 p3) { vec2 d0 = p0 - p1; vec2 d1 = p2 - p3; float s0 = cross2(p0, p1); float s1 = cross2(p2, p3); float d = cross2(d0, d1); float nx = s0 * d1.x - d0.x * s1; float ny = s0 * d1.y - d0.y * s1; return vec2(nx / d, ny / d); } VertexInfo write_transform_vertex(RectWithSize local_segment_rect, RectWithSize local_prim_rect, RectWithSize local_clip_rect, vec4 clip_edge_mask, float z, Transform transform, PictureTask task) { // Calculate a clip rect from local_rect + local clip RectWithEndpoint clip_rect = to_rect_with_endpoint(local_clip_rect); RectWithEndpoint segment_rect = to_rect_with_endpoint(local_segment_rect); segment_rect.p0 = clamp(segment_rect.p0, clip_rect.p0, clip_rect.p1); segment_rect.p1 = clamp(segment_rect.p1, clip_rect.p0, clip_rect.p1); // Calculate a clip rect from local_rect + local clip RectWithEndpoint prim_rect = to_rect_with_endpoint(local_prim_rect); prim_rect.p0 = clamp(prim_rect.p0, clip_rect.p0, clip_rect.p1); prim_rect.p1 = clamp(prim_rect.p1, clip_rect.p0, clip_rect.p1); // As this is a transform shader, extrude by 2 (local space) pixels // in each direction. This gives enough space around the edge to // apply distance anti-aliasing. Technically, it: // (a) slightly over-estimates the number of required pixels in the simple case. // (b) might not provide enough edge in edge case perspective projections. // However, it's fast and simple. If / when we ever run into issues, we // can do some math on the projection matrix to work out a variable // amount to extrude. // Only extrude along edges where we are going to apply AA. float extrude_amount = 2.0; vec4 extrude_distance = vec4(extrude_amount) * clip_edge_mask; local_segment_rect.p0 -= extrude_distance.xy; local_segment_rect.size += extrude_distance.xy + extrude_distance.zw; // Select the corner of the local rect that we are processing. vec2 local_pos = local_segment_rect.p0 + local_segment_rect.size * aPosition.xy; // Convert the world positions to device pixel space. vec2 task_offset = task.common_data.task_rect.p0 - task.content_origin; // Transform the current vertex to the world cpace. vec4 world_pos = transform.m * vec4(local_pos, 0.0, 1.0); vec4 final_pos = vec4( world_pos.xy * task.device_pixel_scale + task_offset * world_pos.w, z * world_pos.w, world_pos.w ); gl_Position = uTransform * final_pos; init_transform_vs(mix( vec4(prim_rect.p0, prim_rect.p1), vec4(segment_rect.p0, segment_rect.p1), clip_edge_mask )); VertexInfo vi = VertexInfo( local_pos, world_pos ); return vi; } void write_clip(vec4 world_pos, ClipArea area) { vec2 uv = world_pos.xy * area.device_pixel_scale + world_pos.w * (area.common_data.task_rect.p0 - area.screen_origin); vClipMaskUvBounds = vec4( area.common_data.task_rect.p0, area.common_data.task_rect.p0 + area.common_data.task_rect.size ); vClipMaskUv = vec4(uv, area.common_data.texture_layer_index, world_pos.w); } // Read the exta image data containing the homogeneous screen space coordinates // of the corners, interpolate between them, and return real screen space UV. vec2 get_image_quad_uv(int address, vec2 f) { ImageResourceExtra extra_data = fetch_image_resource_extra(address); vec4 x = mix(extra_data.st_tl, extra_data.st_tr, f.x); vec4 y = mix(extra_data.st_bl, extra_data.st_br, f.x); vec4 z = mix(x, y, f.y); return z.xy / z.w; } #endif //WR_VERTEX_SHADER #ifdef WR_FRAGMENT_SHADER float do_clip() { // check for the dummy bounds, which are given to the opaque objects if (vClipMaskUvBounds.xy == vClipMaskUvBounds.zw) { return 1.0; } // anything outside of the mask is considered transparent //Note: we assume gl_FragCoord.w == interpolated(1 / vClipMaskUv.w) vec2 mask_uv = vClipMaskUv.xy * gl_FragCoord.w; bvec2 left = lessThanEqual(vClipMaskUvBounds.xy, mask_uv); // inclusive bvec2 right = greaterThan(vClipMaskUvBounds.zw, mask_uv); // non-inclusive // bail out if the pixel is outside the valid bounds if (!all(bvec4(left, right))) { return 0.0; } // finally, the slow path - fetch the mask value from an image // Note the Z getting rounded to the nearest integer because the variable // is still interpolated and becomes a subject of precision-caused // fluctuations, see https://bugzilla.mozilla.org/show_bug.cgi?id=1491911 ivec3 tc = ivec3(mask_uv, vClipMaskUv.z + 0.5); return texelFetch(sPrevPassAlpha, tc, 0).r; } #ifdef WR_FEATURE_DITHERING vec4 dither(vec4 color) { const int matrix_mask = 7; ivec2 pos = ivec2(gl_FragCoord.xy) & ivec2(matrix_mask); float noise_normalized = (texelFetch(sDither, pos, 0).r * 255.0 + 0.5) / 64.0; float noise = (noise_normalized - 0.5) / 256.0; // scale down to the unit length return color + vec4(noise, noise, noise, 0); } #else vec4 dither(vec4 color) { return color; } #endif //WR_FEATURE_DITHERING vec4 sample_gradient(HIGHP_FS_ADDRESS int address, float offset, float gradient_repeat) { // Modulo the offset if the gradient repeats. float x = mix(offset, fract(offset), gradient_repeat); // Calculate the color entry index to use for this offset: // offsets < 0 use the first color entry, 0 // offsets from [0, 1) use the color entries in the range of [1, N-1) // offsets >= 1 use the last color entry, N-1 // so transform the range [0, 1) -> [1, N-1) // TODO(gw): In the future we might consider making the size of the // LUT vary based on number / distribution of stops in the gradient. const int GRADIENT_ENTRIES = 128; x = 1.0 + x * float(GRADIENT_ENTRIES); // Calculate the texel to index into the gradient color entries: // floor(x) is the gradient color entry index // fract(x) is the linear filtering factor between start and end int lut_offset = 2 * int(floor(x)); // There is a [start, end] color per entry. // Ensure we don't fetch outside the valid range of the LUT. lut_offset = clamp(lut_offset, 0, 2 * (GRADIENT_ENTRIES + 1)); // Fetch the start and end color. vec4 texels[2] = fetch_from_gpu_cache_2(address + lut_offset); // Finally interpolate and apply dithering return dither(mix(texels[0], texels[1], fract(x))); } #endif //WR_FRAGMENT_SHADER