Copy as Markdown

Other Tools

struct ps_quad_gradient_common {
struct Samplers {
sampler2D_impl sGpuBufferF_impl;
int sGpuBufferF_slot;
isampler2D_impl sGpuBufferI_impl;
int sGpuBufferI_slot;
sampler2D_impl sRenderTasks_impl;
int sRenderTasks_slot;
sampler2D_impl sTransformPalette_impl;
int sTransformPalette_slot;
bool set_slot(int index, int value) {
switch (index) {
case 3:
sGpuBufferF_slot = value;
return true;
case 4:
sGpuBufferI_slot = value;
return true;
case 2:
sRenderTasks_slot = value;
return true;
case 1:
sTransformPalette_slot = value;
return true;
}
return false;
}
} samplers;
struct AttribLocations {
int aPosition = NULL_ATTRIB;
int aData = NULL_ATTRIB;
void bind_loc(const char* name, int index) {
if (strcmp("aPosition", name) == 0) { aPosition = index; return; }
if (strcmp("aData", name) == 0) { aData = index; return; }
}
int get_loc(const char* name) const {
if (strcmp("aPosition", name) == 0) { return aPosition != NULL_ATTRIB ? aPosition : -1; }
if (strcmp("aData", name) == 0) { return aData != NULL_ATTRIB ? aData : -1; }
return -1;
}
} attrib_locations;
vec4_scalar vTransformBounds;
vec4_scalar v_color;
ivec4_scalar v_flags;
ivec4_scalar v_gradient_header;
vec4_scalar v_flat_data;
vec4_scalar v_stop_offsets;
vec4_scalar v_color0;
vec4_scalar v_color1;
sampler2D sGpuBufferF;
isampler2D sGpuBufferI;
sampler2D sRenderTasks;
sampler2D sTransformPalette;
mat4_scalar uTransform;
void bind_textures() {
sGpuBufferF = lookup_sampler(&samplers.sGpuBufferF_impl, samplers.sGpuBufferF_slot);
sGpuBufferI = lookup_isampler(&samplers.sGpuBufferI_impl, samplers.sGpuBufferI_slot);
sRenderTasks = lookup_sampler(&samplers.sRenderTasks_impl, samplers.sRenderTasks_slot);
sTransformPalette = lookup_sampler(&samplers.sTransformPalette_impl, samplers.sTransformPalette_slot);
}
};
struct ps_quad_gradient_vert : VertexShaderImpl, ps_quad_gradient_common {
private:
typedef ps_quad_gradient_vert Self;
// mat4_scalar uTransform;
vec2 aPosition;
// sampler2D sColor0;
// sampler2D sColor1;
// sampler2D sColor2;
struct RectWithSize_scalar {
vec2_scalar p0;
vec2_scalar size;
RectWithSize_scalar() = default;
RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){}
};
struct RectWithSize {
vec2 p0;
vec2 size;
RectWithSize() = default;
RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){}
RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){
}
IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){
}
friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize(
if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size));
}};
struct RectWithEndpoint_scalar {
vec2_scalar p0;
vec2_scalar p1;
RectWithEndpoint_scalar() = default;
RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){}
};
struct RectWithEndpoint {
vec2 p0;
vec2 p1;
RectWithEndpoint() = default;
RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){}
RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){
}
IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){
}
friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint(
if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1));
}};
// vec4_scalar vTransformBounds;
// sampler2D sTransformPalette;
struct Transform_scalar {
mat4_scalar m;
mat4_scalar inv_m;
bool is_axis_aligned;
Transform_scalar() = default;
Transform_scalar(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){}
};
struct Transform {
mat4 m;
mat4 inv_m;
Bool is_axis_aligned;
Transform() = default;
Transform(mat4 m, mat4 inv_m, Bool is_axis_aligned) : m(m), inv_m(inv_m), is_axis_aligned(is_axis_aligned){}
Transform(mat4_scalar m, mat4_scalar inv_m, bool is_axis_aligned):m(m),inv_m(inv_m),is_axis_aligned(is_axis_aligned){
}
IMPLICIT Transform(Transform_scalar s):m(s.m),inv_m(s.inv_m),is_axis_aligned(s.is_axis_aligned){
}
friend Transform if_then_else(I32 c, Transform t, Transform e) { return Transform(
if_then_else(c, t.m, e.m), if_then_else(c, t.inv_m, e.inv_m), if_then_else(c, t.is_axis_aligned, e.is_axis_aligned));
}};
// sampler2D sRenderTasks;
struct RenderTaskData_scalar {
RectWithEndpoint_scalar task_rect;
vec4_scalar user_data;
RenderTaskData_scalar() = default;
RenderTaskData_scalar(RectWithEndpoint_scalar task_rect, vec4_scalar user_data) : task_rect(task_rect), user_data(user_data){}
};
struct RenderTaskData {
RectWithEndpoint task_rect;
vec4 user_data;
RenderTaskData() = default;
RenderTaskData(RectWithEndpoint task_rect, vec4 user_data) : task_rect(task_rect), user_data(user_data){}
RenderTaskData(RectWithEndpoint_scalar task_rect, vec4_scalar user_data):task_rect(task_rect),user_data(user_data){
}
IMPLICIT RenderTaskData(RenderTaskData_scalar s):task_rect(s.task_rect),user_data(s.user_data){
}
friend RenderTaskData if_then_else(I32 c, RenderTaskData t, RenderTaskData e) { return RenderTaskData(
if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.user_data, e.user_data));
}};
struct PictureTask_scalar {
RectWithEndpoint_scalar task_rect;
float device_pixel_scale;
vec2_scalar content_origin;
PictureTask_scalar() = default;
PictureTask_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){}
};
struct PictureTask {
RectWithEndpoint task_rect;
Float device_pixel_scale;
vec2 content_origin;
PictureTask() = default;
PictureTask(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 content_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), content_origin(content_origin){}
PictureTask(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar content_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),content_origin(content_origin){
}
IMPLICIT PictureTask(PictureTask_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),content_origin(s.content_origin){
}
friend PictureTask if_then_else(I32 c, PictureTask t, PictureTask e) { return PictureTask(
if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.content_origin, e.content_origin));
}};
struct ClipArea_scalar {
RectWithEndpoint_scalar task_rect;
float device_pixel_scale;
vec2_scalar screen_origin;
ClipArea_scalar() = default;
ClipArea_scalar(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){}
};
struct ClipArea {
RectWithEndpoint task_rect;
Float device_pixel_scale;
vec2 screen_origin;
ClipArea() = default;
ClipArea(RectWithEndpoint task_rect, Float device_pixel_scale, vec2 screen_origin) : task_rect(task_rect), device_pixel_scale(device_pixel_scale), screen_origin(screen_origin){}
ClipArea(RectWithEndpoint_scalar task_rect, float device_pixel_scale, vec2_scalar screen_origin):task_rect(task_rect),device_pixel_scale(device_pixel_scale),screen_origin(screen_origin){
}
IMPLICIT ClipArea(ClipArea_scalar s):task_rect(s.task_rect),device_pixel_scale(s.device_pixel_scale),screen_origin(s.screen_origin){
}
friend ClipArea if_then_else(I32 c, ClipArea t, ClipArea e) { return ClipArea(
if_then_else(c, t.task_rect, e.task_rect), if_then_else(c, t.device_pixel_scale, e.device_pixel_scale), if_then_else(c, t.screen_origin, e.screen_origin));
}};
// sampler2D sGpuBufferF;
// isampler2D sGpuBufferI;
// vec4_scalar v_color;
// ivec4_scalar v_flags;
ivec4_scalar aData;
struct QuadSegment_scalar {
RectWithEndpoint_scalar rect;
RectWithEndpoint_scalar uv_rect;
QuadSegment_scalar() = default;
QuadSegment_scalar(RectWithEndpoint_scalar rect, RectWithEndpoint_scalar uv_rect) : rect(rect), uv_rect(uv_rect){}
};
struct QuadSegment {
RectWithEndpoint rect;
RectWithEndpoint uv_rect;
QuadSegment() = default;
QuadSegment(RectWithEndpoint rect, RectWithEndpoint uv_rect) : rect(rect), uv_rect(uv_rect){}
QuadSegment(RectWithEndpoint_scalar rect, RectWithEndpoint_scalar uv_rect):rect(rect),uv_rect(uv_rect){
}
IMPLICIT QuadSegment(QuadSegment_scalar s):rect(s.rect),uv_rect(s.uv_rect){
}
friend QuadSegment if_then_else(I32 c, QuadSegment t, QuadSegment e) { return QuadSegment(
if_then_else(c, t.rect, e.rect), if_then_else(c, t.uv_rect, e.uv_rect));
}};
struct PrimitiveInfo_scalar {
vec2_scalar local_pos;
RectWithEndpoint_scalar local_prim_rect;
RectWithEndpoint_scalar local_clip_rect;
QuadSegment_scalar segment;
int32_t edge_flags;
int32_t quad_flags;
ivec2_scalar pattern_input;
PrimitiveInfo_scalar() = default;
PrimitiveInfo_scalar(vec2_scalar local_pos, RectWithEndpoint_scalar local_prim_rect, RectWithEndpoint_scalar local_clip_rect, QuadSegment_scalar segment, int32_t edge_flags, int32_t quad_flags, ivec2_scalar pattern_input) : local_pos(local_pos), local_prim_rect(local_prim_rect), local_clip_rect(local_clip_rect), segment(segment), edge_flags(edge_flags), quad_flags(quad_flags), pattern_input(pattern_input){}
};
struct PrimitiveInfo {
vec2 local_pos;
RectWithEndpoint local_prim_rect;
RectWithEndpoint local_clip_rect;
QuadSegment segment;
I32 edge_flags;
I32 quad_flags;
ivec2 pattern_input;
PrimitiveInfo() = default;
PrimitiveInfo(vec2 local_pos, RectWithEndpoint local_prim_rect, RectWithEndpoint local_clip_rect, QuadSegment segment, I32 edge_flags, I32 quad_flags, ivec2 pattern_input) : local_pos(local_pos), local_prim_rect(local_prim_rect), local_clip_rect(local_clip_rect), segment(segment), edge_flags(edge_flags), quad_flags(quad_flags), pattern_input(pattern_input){}
PrimitiveInfo(vec2_scalar local_pos, RectWithEndpoint_scalar local_prim_rect, RectWithEndpoint_scalar local_clip_rect, QuadSegment_scalar segment, int32_t edge_flags, int32_t quad_flags, ivec2_scalar pattern_input):local_pos(local_pos),local_prim_rect(local_prim_rect),local_clip_rect(local_clip_rect),segment(segment),edge_flags(edge_flags),quad_flags(quad_flags),pattern_input(pattern_input){
}
IMPLICIT PrimitiveInfo(PrimitiveInfo_scalar s):local_pos(s.local_pos),local_prim_rect(s.local_prim_rect),local_clip_rect(s.local_clip_rect),segment(s.segment),edge_flags(s.edge_flags),quad_flags(s.quad_flags),pattern_input(s.pattern_input){
}
friend PrimitiveInfo if_then_else(I32 c, PrimitiveInfo t, PrimitiveInfo e) { return PrimitiveInfo(
if_then_else(c, t.local_pos, e.local_pos), if_then_else(c, t.local_prim_rect, e.local_prim_rect), if_then_else(c, t.local_clip_rect, e.local_clip_rect), if_then_else(c, t.segment, e.segment), if_then_else(c, t.edge_flags, e.edge_flags), if_then_else(c, t.quad_flags, e.quad_flags), if_then_else(c, t.pattern_input, e.pattern_input));
}};
struct QuadPrimitive_scalar {
RectWithEndpoint_scalar bounds;
RectWithEndpoint_scalar clip;
RectWithEndpoint_scalar uv_rect;
vec4_scalar pattern_scale_offset;
vec4_scalar color;
QuadPrimitive_scalar() = default;
QuadPrimitive_scalar(RectWithEndpoint_scalar bounds, RectWithEndpoint_scalar clip, RectWithEndpoint_scalar uv_rect, vec4_scalar pattern_scale_offset, vec4_scalar color) : bounds(bounds), clip(clip), uv_rect(uv_rect), pattern_scale_offset(pattern_scale_offset), color(color){}
};
struct QuadPrimitive {
RectWithEndpoint bounds;
RectWithEndpoint clip;
RectWithEndpoint uv_rect;
vec4 pattern_scale_offset;
vec4 color;
QuadPrimitive() = default;
QuadPrimitive(RectWithEndpoint bounds, RectWithEndpoint clip, RectWithEndpoint uv_rect, vec4 pattern_scale_offset, vec4 color) : bounds(bounds), clip(clip), uv_rect(uv_rect), pattern_scale_offset(pattern_scale_offset), color(color){}
QuadPrimitive(RectWithEndpoint_scalar bounds, RectWithEndpoint_scalar clip, RectWithEndpoint_scalar uv_rect, vec4_scalar pattern_scale_offset, vec4_scalar color):bounds(bounds),clip(clip),uv_rect(uv_rect),pattern_scale_offset(pattern_scale_offset),color(color){
}
IMPLICIT QuadPrimitive(QuadPrimitive_scalar s):bounds(s.bounds),clip(s.clip),uv_rect(s.uv_rect),pattern_scale_offset(s.pattern_scale_offset),color(s.color){
}
friend QuadPrimitive if_then_else(I32 c, QuadPrimitive t, QuadPrimitive e) { return QuadPrimitive(
if_then_else(c, t.bounds, e.bounds), if_then_else(c, t.clip, e.clip), if_then_else(c, t.uv_rect, e.uv_rect), if_then_else(c, t.pattern_scale_offset, e.pattern_scale_offset), if_then_else(c, t.color, e.color));
}};
struct QuadHeader_scalar {
int32_t transform_id;
int32_t z_id;
ivec2_scalar pattern_input;
QuadHeader_scalar() = default;
QuadHeader_scalar(int32_t transform_id, int32_t z_id, ivec2_scalar pattern_input) : transform_id(transform_id), z_id(z_id), pattern_input(pattern_input){}
};
struct QuadHeader {
I32 transform_id;
I32 z_id;
ivec2 pattern_input;
QuadHeader() = default;
QuadHeader(I32 transform_id, I32 z_id, ivec2 pattern_input) : transform_id(transform_id), z_id(z_id), pattern_input(pattern_input){}
QuadHeader(int32_t transform_id, int32_t z_id, ivec2_scalar pattern_input):transform_id(transform_id),z_id(z_id),pattern_input(pattern_input){
}
IMPLICIT QuadHeader(QuadHeader_scalar s):transform_id(s.transform_id),z_id(s.z_id),pattern_input(s.pattern_input){
}
friend QuadHeader if_then_else(I32 c, QuadHeader t, QuadHeader e) { return QuadHeader(
if_then_else(c, t.transform_id, e.transform_id), if_then_else(c, t.z_id, e.z_id), if_then_else(c, t.pattern_input, e.pattern_input));
}};
struct QuadInstance_scalar {
int32_t prim_address_i;
int32_t prim_address_f;
int32_t quad_flags;
int32_t edge_flags;
int32_t part_index;
int32_t segment_index;
int32_t picture_task_address;
QuadInstance_scalar() = default;
QuadInstance_scalar(int32_t prim_address_i, int32_t prim_address_f, int32_t quad_flags, int32_t edge_flags, int32_t part_index, int32_t segment_index, int32_t picture_task_address) : prim_address_i(prim_address_i), prim_address_f(prim_address_f), quad_flags(quad_flags), edge_flags(edge_flags), part_index(part_index), segment_index(segment_index), picture_task_address(picture_task_address){}
};
struct QuadInstance {
I32 prim_address_i;
I32 prim_address_f;
I32 quad_flags;
I32 edge_flags;
I32 part_index;
I32 segment_index;
I32 picture_task_address;
QuadInstance() = default;
QuadInstance(I32 prim_address_i, I32 prim_address_f, I32 quad_flags, I32 edge_flags, I32 part_index, I32 segment_index, I32 picture_task_address) : prim_address_i(prim_address_i), prim_address_f(prim_address_f), quad_flags(quad_flags), edge_flags(edge_flags), part_index(part_index), segment_index(segment_index), picture_task_address(picture_task_address){}
QuadInstance(int32_t prim_address_i, int32_t prim_address_f, int32_t quad_flags, int32_t edge_flags, int32_t part_index, int32_t segment_index, int32_t picture_task_address):prim_address_i(prim_address_i),prim_address_f(prim_address_f),quad_flags(quad_flags),edge_flags(edge_flags),part_index(part_index),segment_index(segment_index),picture_task_address(picture_task_address){
}
IMPLICIT QuadInstance(QuadInstance_scalar s):prim_address_i(s.prim_address_i),prim_address_f(s.prim_address_f),quad_flags(s.quad_flags),edge_flags(s.edge_flags),part_index(s.part_index),segment_index(s.segment_index),picture_task_address(s.picture_task_address){
}
friend QuadInstance if_then_else(I32 c, QuadInstance t, QuadInstance e) { return QuadInstance(
if_then_else(c, t.prim_address_i, e.prim_address_i), if_then_else(c, t.prim_address_f, e.prim_address_f), if_then_else(c, t.quad_flags, e.quad_flags), if_then_else(c, t.edge_flags, e.edge_flags), if_then_else(c, t.part_index, e.part_index), if_then_else(c, t.segment_index, e.segment_index), if_then_else(c, t.picture_task_address, e.picture_task_address));
}};
struct VertexInfo_scalar {
vec2_scalar local_pos;
VertexInfo_scalar() = default;
explicit VertexInfo_scalar(vec2_scalar local_pos) : local_pos(local_pos){}
};
struct VertexInfo {
vec2 local_pos;
VertexInfo() = default;
explicit VertexInfo(vec2 local_pos) : local_pos(local_pos){}
explicit VertexInfo(vec2_scalar local_pos):local_pos(local_pos){
}
IMPLICIT VertexInfo(VertexInfo_scalar s):local_pos(s.local_pos){
}
friend VertexInfo if_then_else(I32 c, VertexInfo t, VertexInfo e) { return VertexInfo(
if_then_else(c, t.local_pos, e.local_pos));
}};
// ivec4_scalar v_gradient_header;
vec4 v_interpolated_data;
// vec4_scalar v_flat_data;
// vec4_scalar v_stop_offsets;
// vec4_scalar v_color0;
// vec4_scalar v_color1;
QuadInstance_scalar decode_instance() {
QuadInstance_scalar qi = QuadInstance_scalar((aData).x, (aData).y, (((aData).z)>>(24))&(255), (((aData).z)>>(16))&(255), (((aData).z)>>(8))&(255), (((aData).z)>>(0))&(255), (aData).w);
return qi;
}
ivec2_scalar get_gpu_buffer_uv(int32_t address) {
return make_ivec2((make_uint(address))%(1024u), (make_uint(address))/(1024u));
}
ivec4_scalar fetch_from_gpu_buffer_1i(int32_t address) {
ivec2_scalar uv = get_gpu_buffer_uv(address);
return texelFetch(sGpuBufferI, uv, 0);
}
QuadHeader_scalar fetch_header(int32_t address) {
ivec4_scalar header = fetch_from_gpu_buffer_1i(address);
QuadHeader_scalar qh = QuadHeader_scalar((header).x, (header).y, (header).sel(Z,W));
return qh;
}
Transform_scalar fetch_transform(int32_t id) {
Transform_scalar transform;
(transform).is_axis_aligned = ((id)>>(23))==(0);
int32_t index = (id)&(8388607);
ivec2_scalar uv = make_ivec2(make_int((8u)*((make_uint(index))%((1024u)/(8u)))), make_int((make_uint(index))/((1024u)/(8u))));
ivec2_scalar uv0 = make_ivec2(((uv).x)+(0), (uv).y);
auto sTransformPalette_uv0_fetch = texelFetchPtr(sTransformPalette, uv0, 0, 7, 0, 0);
(transform).m[0] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 0, 0);
(transform).m[1] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 1, 0);
(transform).m[2] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 2, 0);
(transform).m[3] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 3, 0);
(transform).inv_m[0] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 4, 0);
(transform).inv_m[1] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 5, 0);
(transform).inv_m[2] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 6, 0);
(transform).inv_m[3] = texelFetchUnchecked(sTransformPalette, sTransformPalette_uv0_fetch, 7, 0);
return transform;
}
RenderTaskData_scalar fetch_render_task_data(int32_t index) {
ivec2_scalar uv = make_ivec2(make_int((2u)*((make_uint(index))%((1024u)/(2u)))), make_int((make_uint(index))/((1024u)/(2u))));
auto sRenderTasks_uv_fetch = texelFetchPtr(sRenderTasks, uv, 0, 1, 0, 0);
vec4_scalar texel0 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 0, 0);
vec4_scalar texel1 = texelFetchUnchecked(sRenderTasks, sRenderTasks_uv_fetch, 1, 0);
RectWithEndpoint_scalar task_rect = RectWithEndpoint_scalar((texel0).sel(X,Y), (texel0).sel(Z,W));
RenderTaskData_scalar data = RenderTaskData_scalar(task_rect, texel1);
return data;
}
PictureTask_scalar fetch_picture_task(int32_t address) {
RenderTaskData_scalar task_data = fetch_render_task_data(address);
PictureTask_scalar task = PictureTask_scalar((task_data).task_rect, ((task_data).user_data).x, ((task_data).user_data).sel(Y,Z));
return task;
}
Array<vec4_scalar,5> fetch_from_gpu_buffer_5f(int32_t address) {
ivec2_scalar uv = get_gpu_buffer_uv(address);
auto sGpuBufferF_uv_fetch = texelFetchPtr(sGpuBufferF, uv, 0, 4, 0, 0);
return Array<vec4_scalar,5>{{texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 0, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 1, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 2, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 3, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 4, 0)}};
}
QuadPrimitive_scalar fetch_primitive(int32_t index) {
QuadPrimitive_scalar prim;
Array<vec4_scalar,5> texels = fetch_from_gpu_buffer_5f(index);
(prim).bounds = RectWithEndpoint_scalar((texels[0]).sel(X,Y), (texels[0]).sel(Z,W));
(prim).clip = RectWithEndpoint_scalar((texels[1]).sel(X,Y), (texels[1]).sel(Z,W));
(prim).uv_rect = RectWithEndpoint_scalar((texels[2]).sel(X,Y), (texels[2]).sel(Z,W));
(prim).pattern_scale_offset = texels[3];
(prim).color = texels[4];
return prim;
}
Array<vec4_scalar,2> fetch_from_gpu_buffer_2f(int32_t address) {
ivec2_scalar uv = get_gpu_buffer_uv(address);
auto sGpuBufferF_uv_fetch = texelFetchPtr(sGpuBufferF, uv, 0, 1, 0, 0);
return Array<vec4_scalar,2>{{texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 0, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 1, 0)}};
}
QuadSegment_scalar fetch_segment(int32_t base, int32_t index) {
QuadSegment_scalar seg;
Array<vec4_scalar,2> texels = fetch_from_gpu_buffer_2f(((base)+(5))+((index)*(2)));
(seg).rect = RectWithEndpoint_scalar((texels[0]).sel(X,Y), (texels[0]).sel(Z,W));
(seg).uv_rect = RectWithEndpoint_scalar((texels[1]).sel(X,Y), (texels[1]).sel(Z,W));
return seg;
}
float edge_aa_offset(int32_t edge, int32_t flags) {
return ((flags)&(edge))!=(0) ? 2.f : 0.f;
}
vec2 rect_clamp(RectWithEndpoint_scalar rect, vec2 pt) {
return clamp(pt, (rect).p0, (rect).p1);
}
VertexInfo write_vertex(vec2 local_pos, float z, Transform_scalar transform, vec2_scalar content_origin, RectWithEndpoint_scalar task_rect, float device_pixel_scale, int32_t quad_flags) {
VertexInfo vi;
vec4 world_pos = ((transform).m)*(make_vec4(local_pos, 0.f, 1.f));
vec2 device_pos = ((world_pos).sel(X,Y))*(device_pixel_scale);
if (((quad_flags)&(2))!=(0)) {
{
RectWithEndpoint_scalar device_clip_rect = RectWithEndpoint_scalar(content_origin, ((content_origin)+((task_rect).p1))-((task_rect).p0));
device_pos = rect_clamp(device_clip_rect, device_pos);
(vi).local_pos = (((transform).inv_m)*(make_vec4((device_pos)/(device_pixel_scale), 0.f, 1.f))).sel(X,Y);
}
} else {
(vi).local_pos = local_pos;
}
vec2_scalar final_offset = (-(content_origin))+((task_rect).p0);
gl_Position = (uTransform)*(make_vec4((device_pos)+((final_offset)*((world_pos).w)), (z)*((world_pos).w), (world_pos).w));
return vi;
}
vec2_scalar scale_offset_map_point(vec4_scalar scale_offset, vec2_scalar p) {
return ((p)*((scale_offset).sel(X,Y)))+((scale_offset).sel(Z,W));
}
RectWithEndpoint_scalar scale_offset_map_rect(vec4_scalar scale_offset, RectWithEndpoint_scalar r) {
return RectWithEndpoint_scalar(scale_offset_map_point(scale_offset, (r).p0), scale_offset_map_point(scale_offset, (r).p1));
}
vec2 scale_offset_map_point(vec4_scalar scale_offset, vec2 p) {
return ((p)*((scale_offset).sel(X,Y)))+((scale_offset).sel(Z,W));
}
PrimitiveInfo quad_primive_info(void) {
QuadInstance_scalar qi = decode_instance();
QuadHeader_scalar qh = fetch_header((qi).prim_address_i);
Transform_scalar transform = fetch_transform((qh).transform_id);
PictureTask_scalar task = fetch_picture_task((qi).picture_task_address);
QuadPrimitive_scalar prim = fetch_primitive((qi).prim_address_f);
float z = make_float((qh).z_id);
QuadSegment_scalar seg;
if (((qi).segment_index)==(255)) {
{
(seg).rect = (prim).bounds;
(seg).uv_rect = (prim).uv_rect;
}
} else {
seg = fetch_segment((qi).prim_address_f, (qi).segment_index);
}
RectWithEndpoint_scalar local_coverage_rect = (seg).rect;
(local_coverage_rect).p0 = max((local_coverage_rect).p0, ((prim).clip).p0);
(local_coverage_rect).p1 = min((local_coverage_rect).p1, ((prim).clip).p1);
(local_coverage_rect).p1 = max((local_coverage_rect).p0, (local_coverage_rect).p1);
switch ((qi).part_index) {
case 1:
((local_coverage_rect).p1).x = (((local_coverage_rect).p0).x)+(2.f);
swgl_antiAlias(1);
break;
case 2:
((local_coverage_rect).p0).x = (((local_coverage_rect).p0).x)+(2.f);
((local_coverage_rect).p1).x = (((local_coverage_rect).p1).x)-(2.f);
((local_coverage_rect).p1).y = (((local_coverage_rect).p0).y)+(2.f);
swgl_antiAlias(2);
break;
case 3:
((local_coverage_rect).p0).x = (((local_coverage_rect).p1).x)-(2.f);
swgl_antiAlias(4);
break;
case 4:
((local_coverage_rect).p0).x = (((local_coverage_rect).p0).x)+(2.f);
((local_coverage_rect).p1).x = (((local_coverage_rect).p1).x)-(2.f);
((local_coverage_rect).p0).y = (((local_coverage_rect).p1).y)-(2.f);
swgl_antiAlias(8);
break;
case 0:
((local_coverage_rect).p0).x += edge_aa_offset(1, (qi).edge_flags);
((local_coverage_rect).p1).x -= edge_aa_offset(4, (qi).edge_flags);
((local_coverage_rect).p0).y += edge_aa_offset(2, (qi).edge_flags);
((local_coverage_rect).p1).y -= edge_aa_offset(8, (qi).edge_flags);
break;
case 5:
default:
swgl_antiAlias((qi).edge_flags);
break;
}
vec2 local_pos = mix((local_coverage_rect).p0, (local_coverage_rect).p1, aPosition);
float device_pixel_scale = (task).device_pixel_scale;
if ((((qi).quad_flags)&(4))!=(0)) {
{
device_pixel_scale = 1.f;
}
}
VertexInfo vi = write_vertex(local_pos, z, transform, (task).content_origin, (task).task_rect, device_pixel_scale, (qi).quad_flags);
v_color = (prim).color;
vec4_scalar pattern_tx = (prim).pattern_scale_offset;
(seg).rect = scale_offset_map_rect(pattern_tx, (seg).rect);
return PrimitiveInfo(scale_offset_map_point(pattern_tx, (vi).local_pos), scale_offset_map_rect(pattern_tx, (prim).bounds), scale_offset_map_rect(pattern_tx, (prim).clip), seg, (qi).edge_flags, (qi).quad_flags, (qh).pattern_input);
}
void antialiasing_vertex(PrimitiveInfo prim) {
}
ivec2 get_gpu_buffer_uv(I32 address) {
return make_ivec2((make_uint(address))%(1024u), (make_uint(address))/(1024u));
}
Array<vec4,3> fetch_from_gpu_buffer_3f(I32 address) {
ivec2 uv = get_gpu_buffer_uv(address);
auto sGpuBufferF_uv_fetch = texelFetchPtr(sGpuBufferF, uv, 0, 2, 0, 0);
return Array<vec4,3>{{texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 0, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 1, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 2, 0)}};
}
ivec4 decode_gradient_header(I32 base_address, vec4 payload) {
I32 kind = make_int((payload).x);
I32 count = make_int((payload).y);
I32 extend_mode = make_int((payload).z);
I32 colors_address = (base_address)+(1);
return make_ivec4(kind, count, extend_mode, colors_address);
}
void linear_gradient_vertex(vec2 position, vec4 data0, I32 _cond_mask_) {
vec2 p0 = (data0).sel(X,Y);
vec2 p1 = (data0).sel(Z,W);
vec2 dir = (p1)-(p0);
dir = if_then_else(_cond_mask_,(dir)/(dot(dir, dir)),dir);
Float offset = dot(p0, dir);
v_interpolated_data = if_then_else(_cond_mask_,make_vec4(position, 0.f, 0.f),v_interpolated_data);
if (_cond_mask_[0]) { v_flat_data = force_scalar(make_vec4(dir, offset, 0.f)); };
}
void radial_gradient_vertex(vec2 position, vec4 data0, vec4 data1, I32 _cond_mask_) {
vec2 center = (data0).sel(X,Y);
vec2 scale = (data0).sel(Z,W);
Float start_radius = (data1).x;
Float end_radius = (data1).y;
Float xy_ratio = (data1).z;
Float rd = (end_radius)-(start_radius);
Float radius_scale = if_then_else((rd)!=(0.f), (1.f)/(rd), 0.f);
start_radius = if_then_else(_cond_mask_,(start_radius)*(radius_scale),start_radius);
vec2 normalized_pos = (((position)*(scale))-(center))*(radius_scale);
(normalized_pos).y = if_then_else(_cond_mask_,(normalized_pos).y*xy_ratio,(normalized_pos).y);
v_interpolated_data = if_then_else(_cond_mask_,make_vec4((normalized_pos).x, (normalized_pos).y, 0.f, 0.f),v_interpolated_data);
if (_cond_mask_[0]) { v_flat_data = force_scalar(make_vec4(start_radius, 0.f, 0.f, 0.f)); };
}
void conic_gradient_vertex(vec2 position, vec4 data0, vec4 data1, I32 _cond_mask_) {
vec2 center = (data0).sel(X,Y);
vec2 scale = (data0).sel(Z,W);
Float start_offset = (data1).x;
Float end_offset = (data1).y;
Float angle = ((3.1415927f)/(2.f))-((data1).z);
Float d = (end_offset)-(start_offset);
Float offset_scale = if_then_else((d)!=(0.f), (1.f)/(d), 0.f);
start_offset = if_then_else(_cond_mask_,(start_offset)*(offset_scale),start_offset);
vec2 dir = ((position)*(scale))-(center);
v_interpolated_data = if_then_else(_cond_mask_,make_vec4(dir, start_offset, offset_scale),v_interpolated_data);
if (_cond_mask_[0]) { v_flat_data = force_scalar(make_vec4(angle, 0.f, 0.f, 0.f)); };
}
vec4 fetch_from_gpu_buffer_1f(I32 address) {
ivec2 uv = get_gpu_buffer_uv(address);
return texelFetch(sGpuBufferF, uv, 0);
}
Array<vec4,2> fetch_from_gpu_buffer_2f(I32 address) {
ivec2 uv = get_gpu_buffer_uv(address);
auto sGpuBufferF_uv_fetch = texelFetchPtr(sGpuBufferF, uv, 0, 1, 0, 0);
return Array<vec4,2>{{texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 0, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 1, 0)}};
}
void pattern_vertex(PrimitiveInfo info) {
I32 address = ((info).pattern_input).x;
Array<vec4,3> gradient = fetch_from_gpu_buffer_3f(address);
ivec4 header = decode_gradient_header((address)+(2), gradient[2]);
vec2 pos = ((info).local_pos)-(((info).local_prim_rect).p0);
auto _c9_ = ((header).x)==(0);
{
linear_gradient_vertex(pos, gradient[0], _c9_);
}
auto _c10_ = ((header).x)==(1);
auto _c11_ = (~(_c9_))&(_c10_);
{
radial_gradient_vertex(pos, gradient[0], gradient[1], _c11_);
}
_c10_ = (~(_c9_))&(~(_c10_));
auto _c12_ = ((header).x)==(2);
auto _c13_ = (_c10_)&(_c12_);
{
conic_gradient_vertex(pos, gradient[0], gradient[1], _c13_);
}
_c12_ = (_c10_)&(~(_c12_));
if (true) {
{
v_interpolated_data = if_then_else(_c12_,make_vec4(0.f),v_interpolated_data);
if (_c12_[0]) { v_flat_data = make_vec4(0.f); };
}
}
I32 count = (header).y;
I32 colors_addr = (header).w;
I32 offsets_addrs = (colors_addr)+(count);
v_stop_offsets = force_scalar(fetch_from_gpu_buffer_1f(offsets_addrs));
v_gradient_header = force_scalar(header);
auto _c14_ = (count)==(2);
{
Array<vec4,2> colors = fetch_from_gpu_buffer_2f(colors_addr);
if (_c14_[0]) { v_color0 = force_scalar(colors[0]); };
if (_c14_[0]) { v_color1 = force_scalar(colors[1]); };
}
}
ALWAYS_INLINE void main() {
PrimitiveInfo prim = quad_primive_info();
auto _c2_ = (((prim).quad_flags)&(16))!=(0);
{
if (_c2_[0]) { (v_flags).z = 1; };
}
{
if (~(_c2_)[0]) { (v_flags).z = 0; };
}
antialiasing_vertex(prim);
pattern_vertex(prim);
}
static void set_uniform_1i(VertexShaderImpl* impl, int index, int value) {
Self* self = (Self*)impl;
if (self->samplers.set_slot(index, value)) return;
switch (index) {
case 3:
assert(0); // sGpuBufferF
break;
case 4:
assert(0); // sGpuBufferI
break;
case 2:
assert(0); // sRenderTasks
break;
case 1:
assert(0); // sTransformPalette
break;
case 5:
assert(0); // uTransform
break;
}
}
static void set_uniform_4fv(VertexShaderImpl* impl, int index, const float *value) {
Self* self = (Self*)impl;
switch (index) {
case 3:
assert(0); // sGpuBufferF
break;
case 4:
assert(0); // sGpuBufferI
break;
case 2:
assert(0); // sRenderTasks
break;
case 1:
assert(0); // sTransformPalette
break;
case 5:
assert(0); // uTransform
break;
}
}
static void set_uniform_matrix4fv(VertexShaderImpl* impl, int index, const float *value) {
Self* self = (Self*)impl;
switch (index) {
case 3:
assert(0); // sGpuBufferF
break;
case 4:
assert(0); // sGpuBufferI
break;
case 2:
assert(0); // sRenderTasks
break;
case 1:
assert(0); // sTransformPalette
break;
case 5:
self->uTransform = mat4_scalar::load_from_ptr(value);
break;
}
}
static void load_attribs(VertexShaderImpl* impl, VertexAttrib *attribs, uint32_t start, int instance, int count) {Self* self = (Self*)impl;
load_attrib(self->aPosition, attribs[self->attrib_locations.aPosition], start, instance, count);
load_flat_attrib(self->aData, attribs[self->attrib_locations.aData], start, instance, count);
}
public:
struct InterpOutputs {
vec4_scalar v_interpolated_data;
};
private:
ALWAYS_INLINE void store_interp_outputs(char* dest_ptr, size_t stride) {
for(int n = 0; n < 4; n++) {
auto* dest = reinterpret_cast<InterpOutputs*>(dest_ptr);
dest->v_interpolated_data = get_nth(v_interpolated_data, n);
dest_ptr += stride;
}
}
static void run(VertexShaderImpl* impl, char* interps, size_t interp_stride) {
Self* self = (Self*)impl;
self->main();
self->store_interp_outputs(interps, interp_stride);
}
static void init_batch(VertexShaderImpl* impl) {
Self* self = (Self*)impl; self->bind_textures(); }
public:
ps_quad_gradient_vert() {
set_uniform_1i_func = &set_uniform_1i;
set_uniform_4fv_func = &set_uniform_4fv;
set_uniform_matrix4fv_func = &set_uniform_matrix4fv;
init_batch_func = &init_batch;
load_attribs_func = &load_attribs;
run_primitive_func = &run;
}
};
struct ps_quad_gradient_frag : FragmentShaderImpl, ps_quad_gradient_vert {
private:
typedef ps_quad_gradient_frag Self;
#define oFragColor gl_FragColor
// vec4 oFragColor;
// sampler2D sColor0;
// sampler2D sColor1;
// sampler2D sColor2;
struct RectWithSize_scalar {
vec2_scalar p0;
vec2_scalar size;
RectWithSize_scalar() = default;
RectWithSize_scalar(vec2_scalar p0, vec2_scalar size) : p0(p0), size(size){}
};
struct RectWithSize {
vec2 p0;
vec2 size;
RectWithSize() = default;
RectWithSize(vec2 p0, vec2 size) : p0(p0), size(size){}
RectWithSize(vec2_scalar p0, vec2_scalar size):p0(p0),size(size){
}
IMPLICIT RectWithSize(RectWithSize_scalar s):p0(s.p0),size(s.size){
}
friend RectWithSize if_then_else(I32 c, RectWithSize t, RectWithSize e) { return RectWithSize(
if_then_else(c, t.p0, e.p0), if_then_else(c, t.size, e.size));
}};
struct RectWithEndpoint_scalar {
vec2_scalar p0;
vec2_scalar p1;
RectWithEndpoint_scalar() = default;
RectWithEndpoint_scalar(vec2_scalar p0, vec2_scalar p1) : p0(p0), p1(p1){}
};
struct RectWithEndpoint {
vec2 p0;
vec2 p1;
RectWithEndpoint() = default;
RectWithEndpoint(vec2 p0, vec2 p1) : p0(p0), p1(p1){}
RectWithEndpoint(vec2_scalar p0, vec2_scalar p1):p0(p0),p1(p1){
}
IMPLICIT RectWithEndpoint(RectWithEndpoint_scalar s):p0(s.p0),p1(s.p1){
}
friend RectWithEndpoint if_then_else(I32 c, RectWithEndpoint t, RectWithEndpoint e) { return RectWithEndpoint(
if_then_else(c, t.p0, e.p0), if_then_else(c, t.p1, e.p1));
}};
// vec4_scalar vTransformBounds;
// sampler2D sGpuBufferF;
// isampler2D sGpuBufferI;
// vec4_scalar v_color;
// ivec4_scalar v_flags;
// ivec4_scalar v_gradient_header;
vec4 v_interpolated_data;
// vec4_scalar v_flat_data;
// vec4_scalar v_stop_offsets;
// vec4_scalar v_color0;
// vec4_scalar v_color1;
float antialiasing_fragment() {
float alpha = 1.f;
return alpha;
}
Float linear_gradient_fragment() {
vec2 pos = (v_interpolated_data).sel(X,Y);
vec2_scalar scale_dir = (v_flat_data).sel(X,Y);
float start_offset = (v_flat_data).z;
return (dot(pos, scale_dir))-(start_offset);
}
Float radial_gradient_fragment() {
vec2 pos = (v_interpolated_data).sel(X,Y);
float start_radius = (v_flat_data).x;
return (length(pos))-(start_radius);
}
Float approx_atan2(Float y, Float x) {
vec2 a = abs(make_vec2(x, y));
Float slope = (min((a).x, (a).y))/(max((a).x, (a).y));
Float s2 = (slope)*(slope);
Float r = (((((((-(0.046496473f))*(s2))+(0.15931422f))*(s2))-(0.32762277f))*(s2))*(slope))+(slope);
r = mix(r, (1.5707964f)-(r), make_float((make_int(((a).y)>((a).x)))&(1)));
r = mix(r, (3.1415927f)-(r), make_float((make_int((x)<(0.f)))&(1)));
r = if_then_else((y)<(0.f), -(r), r);
return r;
}
Float conic_gradient_fragment() {
vec2 current_dir = (v_interpolated_data).sel(X,Y);
Float start_offset = (v_interpolated_data).z;
Float offset_scale = (v_interpolated_data).w;
float angle = (v_flat_data).x;
Float current_angle = (approx_atan2((current_dir).y, (current_dir).x))+(angle);
return ((fract((current_angle)/((2.f)*(3.1415927f))))*(offset_scale))-(start_offset);
}
Float apply_extend_mode(Float offset) {
float mode = make_float((v_gradient_header).z);
offset -= (floor(offset))*(mode);
return offset;
}
vec4 sample_gradient_stops_fast(Float offset) {
Float d = ((v_stop_offsets).y)-((v_stop_offsets).x);
Float factor = 0.f;
auto _c5_ = (offset)<((v_stop_offsets).x);
{
factor = if_then_else(_c5_,0.f,factor);
d = if_then_else(_c5_,1.f,d);
}
auto _c6_ = (offset)>((v_stop_offsets).y);
auto _c7_ = (~(_c5_))&(_c6_);
{
factor = if_then_else(_c7_,1.f,factor);
d = if_then_else(_c7_,1.f,d);
}
_c6_ = (~(_c5_))&(~(_c6_));
auto _c8_ = (_c6_)&((d)>(0.f));
{
factor = if_then_else(_c8_,clamp(((offset)-((v_stop_offsets).x))/(d), 0.f, 1.f),factor);
}
return mix(v_color0, v_color1, factor);
}
ivec2 get_gpu_buffer_uv(I32 address) {
return make_ivec2((make_uint(address))%(1024u), (make_uint(address))/(1024u));
}
vec4 fetch_from_gpu_buffer_1f(I32 address) {
ivec2 uv = get_gpu_buffer_uv(address);
return texelFetch(sGpuBufferF, uv, 0);
}
Array<vec4,2> fetch_from_gpu_buffer_2f(I32 address) {
ivec2 uv = get_gpu_buffer_uv(address);
auto sGpuBufferF_uv_fetch = texelFetchPtr(sGpuBufferF, uv, 0, 1, 0, 0);
return Array<vec4,2>{{texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 0, 0), texelFetchUnchecked(sGpuBufferF, sGpuBufferF_uv_fetch, 1, 0)}};
}
vec4 sample_gradient_stops_tree(Float offset) {
int32_t count = (v_gradient_header).y;
int32_t colors_addr = (v_gradient_header).w;
int32_t level_base_addr = (colors_addr)+(count);
int32_t level_stride = 1;
I32 offset_in_level = 0;
I32 index = 0;
int32_t index_stride = 1;
while (((index_stride)*(5))<=(count)) {
index_stride *= 5;
}
Float prev_offset = 1.f;
Float next_offset = 0.f;
vec4 current_stops = v_stop_offsets;
while ((index_stride)>(0)) {
I32 next_partition = 4;
auto _c20_ = ((current_stops).x)>(offset);
{
next_partition = if_then_else(_c20_,0,next_partition);
next_offset = if_then_else(_c20_,(current_stops).x,next_offset);
}
auto _c21_ = ((current_stops).y)>(offset);
auto _c22_ = (~(_c20_))&(_c21_);
{
next_partition = if_then_else(_c22_,1,next_partition);
prev_offset = if_then_else(_c22_,(current_stops).x,prev_offset);
next_offset = if_then_else(_c22_,(current_stops).y,next_offset);
}
_c21_ = (~(_c20_))&(~(_c21_));
auto _c23_ = ((current_stops).z)>(offset);
auto _c24_ = (_c21_)&(_c23_);
{
next_partition = if_then_else(_c24_,2,next_partition);
prev_offset = if_then_else(_c24_,(current_stops).y,prev_offset);
next_offset = if_then_else(_c24_,(current_stops).z,next_offset);
}
_c23_ = (_c21_)&(~(_c23_));
auto _c25_ = ((current_stops).w)>(offset);
auto _c26_ = (_c23_)&(_c25_);
{
next_partition = if_then_else(_c26_,3,next_partition);
prev_offset = if_then_else(_c26_,(current_stops).z,prev_offset);
next_offset = if_then_else(_c26_,(current_stops).w,next_offset);
}
_c25_ = (_c23_)&(~(_c25_));
{
prev_offset = if_then_else(_c25_,(current_stops).w,prev_offset);
}
index += (next_partition)*(index_stride);
if ((index_stride)==(1)) {
{
break;
}
}
index_stride /= 5;
level_base_addr += level_stride;
level_stride *= 5;
offset_in_level = ((offset_in_level)*(5))+(next_partition);
current_stops = fetch_from_gpu_buffer_1f((level_base_addr)+(offset_in_level));
}
Float d = (next_offset)-(prev_offset);
Float factor = 0.f;
auto _c27_ = (index)>=(count);
{
factor = if_then_else(_c27_,1.f,factor);
}
auto _c28_ = (~(_c27_))&((d)>(0.f));
{
factor = if_then_else(_c28_,clamp(((offset)-(prev_offset))/(d), 0.f, 1.f),factor);
}
auto _c29_ = (index)<(1);
{
index = if_then_else(_c29_,1,index);
}
auto _c30_ = (~(_c29_))&((index)>((count)-(1)));
{
index = if_then_else(_c30_,(count)-(1),index);
}
I32 color_pair_address = ((colors_addr)+(index))-(1);
Array<vec4,2> color_pair = fetch_from_gpu_buffer_2f(color_pair_address);
return mix(color_pair[0], color_pair[1], factor);
}
vec4 dither(vec4 color) {
return color;
}
vec4 pattern_fragment(vec4 color) {
Float offset = 0.f;
switch ((v_gradient_header).x) {
case 0:
{
offset = linear_gradient_fragment();
break;
}
case 1:
{
offset = radial_gradient_fragment();
break;
}
case 2:
{
offset = conic_gradient_fragment();
break;
}
default:
{
break;
}
}
offset = apply_extend_mode(offset);
int32_t stop_count = (v_gradient_header).y;
if ((stop_count)<=(2)) {
{
color *= sample_gradient_stops_fast(offset);
}
} else {
color *= sample_gradient_stops_tree(offset);
}
return dither(color);
}
ALWAYS_INLINE void main() {
vec4_scalar base_color = v_color;
base_color *= antialiasing_fragment();
vec4 output_color = pattern_fragment(base_color);
if (((v_flags).z)!=(0)) {
{
output_color = (output_color).sel(R,R,R,R);
}
}
oFragColor = output_color;
}
ivec2_scalar get_gpu_buffer_uv(int32_t address) {
return make_ivec2((make_uint(address))%(1024u), (make_uint(address))/(1024u));
}
void swgl_drawSpanRGBA8() {
if (((v_gradient_header).x)!=(1)) {
{
return;
}
}
int32_t stop_count = (v_gradient_header).y;
int32_t colors_address = (v_gradient_header).w;
int32_t colors_addr = swgl_validateGradient(sGpuBufferF, get_gpu_buffer_uv(colors_address), stop_count);
int32_t offsets_addr = swgl_validateGradient(sGpuBufferF, get_gpu_buffer_uv((colors_address)+(stop_count)), stop_count);
if (((offsets_addr)<(0))||((colors_addr)<(0))) {
{
swgl_commitSolidRGBA8(make_vec4(1.f, 0.f, 1.f, 1.f));
return;
}
}
vec2 pos = (v_interpolated_data).sel(X,Y);
float start_radius = (v_flat_data).x;
bool repeat = ((v_gradient_header).z)!=(0.f);
swgl_commitRadialGradientFromStopsRGBA8(sGpuBufferF, offsets_addr, colors_addr, stop_count, repeat, pos, start_radius);
}
typedef ps_quad_gradient_vert::InterpOutputs InterpInputs;
InterpInputs interp_step;
struct InterpPerspective {
vec4 v_interpolated_data;
};
InterpPerspective interp_perspective;
static void read_interp_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_;
self->v_interpolated_data = init_interp(init->v_interpolated_data, step->v_interpolated_data);
self->interp_step.v_interpolated_data = step->v_interpolated_data * 4.0f;
}
static void read_perspective_inputs(FragmentShaderImpl* impl, const void* init_, const void* step_) {Self* self = (Self*)impl;const InterpInputs* init = (const InterpInputs*)init_;const InterpInputs* step = (const InterpInputs*)step_;
Float w = 1.0f / self->gl_FragCoord.w;
self->interp_perspective.v_interpolated_data = init_interp(init->v_interpolated_data, step->v_interpolated_data);
self->v_interpolated_data = self->interp_perspective.v_interpolated_data * w;
self->interp_step.v_interpolated_data = step->v_interpolated_data * 4.0f;
}
ALWAYS_INLINE void step_interp_inputs(int steps = 4) {
float chunks = steps * 0.25f;
v_interpolated_data += interp_step.v_interpolated_data * chunks;
}
ALWAYS_INLINE void step_perspective_inputs(int steps = 4) {
step_perspective(steps);
float chunks = steps * 0.25f;
Float w = 1.0f / gl_FragCoord.w;
interp_perspective.v_interpolated_data += interp_step.v_interpolated_data * chunks;
v_interpolated_data = w * interp_perspective.v_interpolated_data;
}
static void run(FragmentShaderImpl* impl) {
Self* self = (Self*)impl;
self->main();
self->step_interp_inputs();
}
static void skip(FragmentShaderImpl* impl, int steps) {
Self* self = (Self*)impl;
self->step_interp_inputs(steps);
}
static void run_perspective(FragmentShaderImpl* impl) {
Self* self = (Self*)impl;
self->main();
self->step_perspective_inputs();
}
static void skip_perspective(FragmentShaderImpl* impl, int steps) {
Self* self = (Self*)impl;
self->step_perspective_inputs(steps);
}
static int draw_span_RGBA8(FragmentShaderImpl* impl) {
Self* self = (Self*)impl; DISPATCH_DRAW_SPAN(self, RGBA8); }
public:
ps_quad_gradient_frag() {
init_span_func = &read_interp_inputs;
run_func = &run;
skip_func = &skip;
draw_span_RGBA8_func = &draw_span_RGBA8;
enable_perspective();
init_span_w_func = &read_perspective_inputs;
run_w_func = &run_perspective;
skip_w_func = &skip_perspective;
}
};
struct ps_quad_gradient_program : ProgramImpl, ps_quad_gradient_frag {
int get_uniform(const char *name) const override {
if (strcmp("sGpuBufferF", name) == 0) { return 3; }
if (strcmp("sGpuBufferI", name) == 0) { return 4; }
if (strcmp("sRenderTasks", name) == 0) { return 2; }
if (strcmp("sTransformPalette", name) == 0) { return 1; }
if (strcmp("uTransform", name) == 0) { return 5; }
return -1;
}
void bind_attrib(const char* name, int index) override {
attrib_locations.bind_loc(name, index);
}
int get_attrib(const char* name) const override {
return attrib_locations.get_loc(name);
}
size_t interpolants_size() const override { return sizeof(InterpOutputs); }
VertexShaderImpl* get_vertex_shader() override {
return this;
}
FragmentShaderImpl* get_fragment_shader() override {
return this;
}
const char* get_name() const override { return "ps_quad_gradient"; }
static ProgramImpl* loader() { return new ps_quad_gradient_program; }
};