Revision: 1098
Author:
jo...@lunarg.com
Date: Fri Jul 24 20:27:07 2015 UTC
Log: GLSL -> Top IR -> LunarGOO: Add A) atomic_uint type, B)
atomic-counter built-ins, and C) atomic built-ins (non image).
https://code.google.com/p/lunarglass/source/detail?r=1098
Added:
/trunk/test/atomic.comp
/trunk/test/baseResults/atomic.comp.out
Modified:
/trunk/Backends/GLSL/BottomToGLSL.cpp
/trunk/Core/LLVM/llvm-3.4/include/llvm/IR/IntrinsicsLunarGLASSTop.td
/trunk/Core/TopBuilder.cpp
/trunk/Core/metadata.h
/trunk/Frontends/glslang/GlslangToTopVisitor.cpp
/trunk/test/runtests
=======================================
--- /dev/null
+++ /trunk/test/atomic.comp Fri Jul 24 20:27:07 2015 UTC
@@ -0,0 +1,39 @@
+#version 310 es
+
+layout(binding = 0) uniform atomic_uint counter;
+layout(binding = 0, offset = 4) uniform atomic_uint countArr[4];
+
+uniform uint value;
+
+int arrX[gl_WorkGroupSize.x];
+int arrY[gl_WorkGroupSize.y];
+int arrZ[gl_WorkGroupSize.z];
+
+uint func(atomic_uint c)
+{
+ return atomicCounterIncrement(c);
+}
+
+shared int atomi;
+shared uint atomu;
+
+void atoms()
+{
+ int origi = atomicAdd(atomi, 3);
+ uint origu = atomicAnd(atomu, value);
+ origu = atomicOr(atomu, 7u);
+ origu = atomicXor(atomu, 7u);
+ origu = atomicMin(atomu, value);
+ origi = atomicMax(atomi, 7);
+ origi = atomicExchange(atomi, origi);
+ origu = atomicCompSwap(atomu, 10u, value);
+}
+
+void main()
+{
+ memoryBarrierAtomicCounter();
+ atoms();
+ func(counter);
+ uint val = atomicCounter(countArr[2]);
+ atomicCounterDecrement(counter);
+}
=======================================
--- /dev/null
+++ /trunk/test/baseResults/atomic.comp.out Fri Jul 24 20:27:07 2015 UTC
@@ -0,0 +1,268 @@
+
+Top IR:
+; ModuleID = 'Glslang'
+
+@atomi = global i32 0
+@atomu = global i32 0
+@value = external addrspace(2) constant i32
+@counter = external addrspace(2) constant i32
+@countArr = external addrspace(2) constant [4 x i32]
+@arrX = internal global [1 x i32] zeroinitializer
+@arrY = internal global [1 x i32] zeroinitializer
+@arrZ = internal global [1 x i32] zeroinitializer
+
+define fastcc void @main() {
+entry:
+ %val = alloca i32
+ %param = alloca i32
+ br label %mainBody
+
+mainBody: ; preds = %entry
+ call void @llvm.gla.memoryBarrierAtomicCounter()
+ call void @"atoms("()
+ %param1 = load i32 addrspace(2)* @counter, !gla.uniform !4
+ store i32 %param1, i32* %param
+ %0 = call i32 @"func(au1;"(i32* %param)
+ %1 = load i32 addrspace(2)* getelementptr inbounds ([4 x i32]
addrspace(2)* @countArr, i32 0, i32 2), !gla.uniform !6
+ %val2 = call i32 @llvm.gla.atomicCounterLoad(i32 %1), !gla.precision !8
+ store i32 %val2, i32* %val
+ %2 = load i32 addrspace(2)* @counter, !gla.uniform !4
+ %3 = call i32 @llvm.gla.atomicCounterDecrement(i32 %2), !gla.precision !8
+ br label %stage-epilogue
+
+stage-epilogue: ; preds = %mainBody
+ br label %stage-exit
+
+stage-exit: ; preds = %stage-epilogue
+ ret void
+}
+
+; Function Attrs: alwaysinline
+define internal fastcc i32 @"func(au1;"(i32*) #0 {
+entry:
+ %1 = load i32* %0
+ %2 = call i32 @llvm.gla.atomicCounterIncrement(i32 %1), !gla.precision !8
+ ret i32 %2
+
+post-return: ; No predecessors!
+ unreachable
+}
+
+; Function Attrs: alwaysinline
+define internal fastcc void @"atoms("() #0 {
+entry:
+ %origu = alloca i32
+ %origi = alloca i32
+ %0 = load i32* @atomi
+ %origi1 = call i32 @llvm.gla.atomicAdd(i32 %0, i32 3), !gla.precision !8
+ store i32 %origi1, i32* %origi
+ %1 = load i32* @atomu
+ %2 = load i32 addrspace(2)* @value, !gla.uniform !2
+ %origu2 = call i32 @llvm.gla.atomicAnd(i32 %1, i32 %2), !gla.precision !8
+ store i32 %origu2, i32* %origu
+ %3 = load i32* @atomu
+ %origu3 = call i32 @llvm.gla.atomicOr(i32 %3, i32 7), !gla.precision !8
+ store i32 %origu3, i32* %origu
+ %4 = load i32* @atomu
+ %origu4 = call i32 @llvm.gla.atomicXor(i32 %4, i32 7), !gla.precision !8
+ store i32 %origu4, i32* %origu
+ %5 = load i32* @atomu
+ %6 = load i32 addrspace(2)* @value, !gla.uniform !2
+ %origu5 = call i32 @llvm.gla.atomicMin(i32 %5, i32 %6), !gla.precision !8
+ store i32 %origu5, i32* %origu
+ %7 = load i32* @atomi
+ %origi6 = call i32 @llvm.gla.atomicMax(i32 %7, i32 7), !gla.precision !8
+ store i32 %origi6, i32* %origi
+ %8 = load i32* @atomi
+ %9 = load i32* %origi
+ %origi7 = call i32 @llvm.gla.atomicExchange(i32 %8,
i32 %9), !gla.precision !8
+ store i32 %origi7, i32* %origi
+ %10 = load i32* @atomu
+ %11 = load i32 addrspace(2)* @value, !gla.uniform !2
+ %origu8 = call i32 @llvm.gla.atomicCompExchange(i32 %10, i32 10,
i32 %11), !gla.precision !8
+ store i32 %origu8, i32* %origu
+ ret void
+}
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCounterIncrement(i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicAdd(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicAnd(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicOr(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicXor(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicMin(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicMax(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicExchange(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCompExchange(i32, i32, i32) #1
+
+; Function Attrs: nounwind
+declare void @llvm.gla.memoryBarrierAtomicCounter() #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCounterLoad(i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCounterDecrement(i32) #1
+
+attributes #0 = { alwaysinline }
+attributes #1 = { nounwind }
+
+!gla.shared = !{!0, !1}
+!gla.uniforms = !{!2, !4, !6}
+!gla.entrypoint = !{!7}
+
+!0 = metadata !{i32* @atomi}
+!1 = metadata !{i32* @atomu}
+!2 = metadata !{metadata !"value", i32 12, i32* @value_typeProxy,
metadata !3}
+!3 = metadata !{i32 1, i32 3, i32 1024, null}
+!4 = metadata !{metadata !"counter", i32 12, i32* @counter_typeProxy,
metadata !5}
+!5 = metadata !{i32 10, i32 3, i32 0, null}
+!6 = metadata !{metadata !"countArr", i32 12, [4 x i32]*
@countArr_typeProxy, metadata !5}
+!7 = metadata !{metadata !"main", i32 15}
+!8 = metadata !{i32 3}
+
+
+Bottom IR:
+; ModuleID = 'Glslang'
+
+@atomi = global i32 0
+@atomu = global i32 0
+@value = external addrspace(2) constant i32
+@counter = external addrspace(2) constant i32
+@countArr = external addrspace(2) constant [4 x i32]
+
+define fastcc void @main() {
+entry:
+ call void @llvm.gla.memoryBarrierAtomicCounter()
+ %0 = load i32* @atomi, align 4
+ %origi1.i = call i32 @llvm.gla.atomicAdd(i32 %0, i32
3), !gla.precision !8
+ %1 = load i32* @atomu, align 4
+ %2 = load i32 addrspace(2)* @value, align 4, !gla.uniform !2
+ %origu2.i = call i32 @llvm.gla.atomicAnd(i32 %1,
i32 %2), !gla.precision !8
+ %origu3.i = call i32 @llvm.gla.atomicOr(i32 %1, i32 7), !gla.precision !8
+ %origu4.i = call i32 @llvm.gla.atomicXor(i32 %1, i32
7), !gla.precision !8
+ %origu5.i = call i32 @llvm.gla.atomicMin(i32 %1,
i32 %2), !gla.precision !8
+ %origi6.i = call i32 @llvm.gla.atomicMax(i32 %0, i32
7), !gla.precision !8
+ %origi7.i = call i32 @llvm.gla.atomicExchange(i32 %0,
i32 %origi6.i), !gla.precision !8
+ %origu8.i = call i32 @llvm.gla.atomicCompExchange(i32 %1, i32 10,
i32 %2), !gla.precision !8
+ %param1 = load i32 addrspace(2)* @counter, align 4, !gla.uniform !4
+ %3 = call i32
@llvm.gla.atomicCounterIncrement(i32 %param1), !gla.precision !8
+ %gla_constGEP = getelementptr [4 x i32] addrspace(2)* @countArr, i32 0,
i32 2
+ %4 = load i32 addrspace(2)* %gla_constGEP, align 4, !gla.uniform !6
+ %val2 = call i32 @llvm.gla.atomicCounterLoad(i32 %4), !gla.precision !8
+ %5 = call i32
@llvm.gla.atomicCounterDecrement(i32 %param1), !gla.precision !8
+ br label %stage-epilogue
+
+stage-epilogue: ; preds = %entry
+ br label %stage-exit
+
+stage-exit: ; preds = %stage-epilogue
+ ret void
+}
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCounterIncrement(i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicAdd(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicAnd(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicOr(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicXor(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicMin(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicMax(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicExchange(i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCompExchange(i32, i32, i32) #0
+
+; Function Attrs: nounwind
+declare void @llvm.gla.memoryBarrierAtomicCounter() #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCounterLoad(i32) #0
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.atomicCounterDecrement(i32) #0
+
+attributes #0 = { nounwind }
+
+!gla.shared = !{!0, !1}
+!gla.uniforms = !{!2, !4, !6}
+!gla.entrypoint = !{!7}
+
+!0 = metadata !{i32* @atomi}
+!1 = metadata !{i32* @atomu}
+!2 = metadata !{metadata !"value", i32 12, i32* @value_typeProxy,
metadata !3}
+!3 = metadata !{i32 1, i32 3, i32 1024, null}
+!4 = metadata !{metadata !"counter", i32 12, i32* @counter_typeProxy,
metadata !5}
+!5 = metadata !{i32 10, i32 3, i32 0, null}
+!6 = metadata !{metadata !"countArr", i32 12, [4 x i32]*
@countArr_typeProxy, metadata !5}
+!7 = metadata !{metadata !"main", i32 15}
+!8 = metadata !{i32 3}
+#version 310 es
+// LunarGOO output
+uniform highp uint value;
+layout(location=0) uniform highp atomic_uint counter;
+layout(location=0) uniform highp atomic_uint countArr[4];
+shared int atomi;
+shared int atomu;
+const int C_3 = 3;
+const int C_7 = 7;
+const int C_10 = 10;
+
+void main()
+{
+ memoryBarrierAtomicCounter();
+ highp int origi = atomicAdd(atomi, C_3);
+ highp int origu = atomicAnd(atomu, int(value));
+ highp int origu1 = atomicOr(atomu, C_7);
+ highp int origu2 = atomicXor(atomu, C_7);
+ highp int origu3 = atomicMin(atomu, int(value));
+ highp int origi1 = atomicMax(atomi, C_7);
+ highp int origi2 = atomicExchange(atomi, origi1);
+ highp int origu4 = atomicCompSwap(atomu, C_10, int(value));
+ highp int H_wxjx121 = atomicCounterIncrement(counter);
+ highp int val = atomicCounter(countArr[2]);
+ highp int H_w2ftqr = atomicCounterDecrement(counter);
+
+}
+
+tempglsl.comp
+Warning, version 310 is not yet complete; most version-specific features
are present, but some are missing.
+ERROR: 0:4: 'atomic_uint' : layout(binding=X) is required
+ERROR: 0:5: 'location' : overlapping use of location 0
+ERROR: 0:5: 'atomic_uint' : layout(binding=X) is required
+ERROR: 0:23: '=' : cannot convert from 'global highp uint' to 'temp highp
int'
+ERROR: 0:24: '=' : cannot convert from 'global highp uint' to 'temp highp
int'
+ERROR: 0:25: '=' : cannot convert from 'global highp uint' to 'temp highp
int'
+ERROR: 6 compilation errors. No code generated.
+
+
=======================================
--- /trunk/Backends/GLSL/BottomToGLSL.cpp Sun May 31 21:29:58 2015 UTC
+++ /trunk/Backends/GLSL/BottomToGLSL.cpp Fri Jul 24 20:27:07 2015 UTC
@@ -329,7 +329,9 @@
class MetaType {
public:
- MetaType() : precision(gla::EMpNone), builtIn(gla::EmbNone),
matrix(false), notSigned(false), block(false), buffer(false),
runtimeArrayed(false), mdAggregate(0), mdSampler(0) { }
+ MetaType() : precision(gla::EMpNone), builtIn(gla::EmbNone),
matrix(false), notSigned(false),
+ block(false), buffer(false), runtimeArrayed(false),
mdAggregate(0), mdSampler(0),
+ atomic(false) { }
std::string name;
gla::EMdPrecision precision;
gla::EMdBuiltIn builtIn;
@@ -340,6 +342,7 @@
bool runtimeArrayed;
const llvm::MDNode* mdAggregate;
const llvm::MDNode* mdSampler;
+ bool atomic;
};
class Assignment;
@@ -3531,6 +3534,20 @@
case llvm::Intrinsic::gla_emitStreamVertex: callString
= "EmitStreamVertex"; break;
case llvm::Intrinsic::gla_endStreamPrimitive: callString
= "EmitStreamVertex"; break;
+ // Atomics
+ case llvm::Intrinsic::gla_atomicCounterLoad: callString
= "atomicCounter"; break;
+ case llvm::Intrinsic::gla_atomicCounterIncrement: callString
= "atomicCounterIncrement"; break;
+ case llvm::Intrinsic::gla_atomicCounterDecrement: callString
= "atomicCounterDecrement"; break;
+
+ case llvm::Intrinsic::gla_atomicAdd: callString
= "atomicAdd"; break;
+ case llvm::Intrinsic::gla_atomicMin: callString
= "atomicMin"; break;
+ case llvm::Intrinsic::gla_atomicMax: callString
= "atomicMax"; break;
+ case llvm::Intrinsic::gla_atomicAnd: callString
= "atomicAnd"; break;
+ case llvm::Intrinsic::gla_atomicOr: callString
= "atomicOr"; break;
+ case llvm::Intrinsic::gla_atomicXor: callString
= "atomicXor"; break;
+ case llvm::Intrinsic::gla_atomicExchange: callString
= "atomicExchange"; break;
+ case llvm::Intrinsic::gla_atomicCompExchange: callString
= "atomicCompSwap"; break;
+
default: break;
}
@@ -3931,7 +3948,9 @@
else if (type == type->getInt1Ty(type->getContext()))
out << "bool";
else if (type == type->getInt32Ty(type->getContext())) {
- if (metaType.notSigned)
+ if (metaType.atomic)
+ out << "atomic_uint";
+ else if (metaType.notSigned)
out << "uint";
else
out << "int";
@@ -4005,6 +4024,7 @@
metaType.matrix = typeLayout == EMtlRowMajorMatrix || typeLayout ==
EMtlColMajorMatrix;
metaType.notSigned = typeLayout == EMtlUnsigned;
+ metaType.atomic = typeLayout == EMtlAtomicUint;
if (! arrayChild)
emitGlaLayout(out, typeLayout, location, metaType.block ||
metaType.mdSampler != 0);
=======================================
--- /trunk/Core/LLVM/llvm-3.4/include/llvm/IR/IntrinsicsLunarGLASSTop.td
Mon May 25 23:01:59 2015 UTC
+++ /trunk/Core/LLVM/llvm-3.4/include/llvm/IR/IntrinsicsLunarGLASSTop.td
Fri Jul 24 20:27:07 2015 UTC
@@ -281,3 +281,19 @@
def int_gla_memoryBarrierImage : Intrinsic<[], [], []>;
def int_gla_groupMemoryBarrier : Intrinsic<[], [], []>;
}
+
+// Atomics
+let TargetPrefix = "gla" in {
+ def int_gla_atomicCounterLoad : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicCounterIncrement : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicCounterDecrement : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty], [IntrReadWriteArgMem]>;
+
+ def int_gla_atomicAdd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicMin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicMax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicAnd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicOr : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicXor : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicExchange : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_gla_atomicCompExchange : Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+}
=======================================
--- /trunk/Core/TopBuilder.cpp Sat Jul 18 17:34:26 2015 UTC
+++ /trunk/Core/TopBuilder.cpp Fri Jul 24 20:27:07 2015 UTC
@@ -2260,6 +2260,14 @@
// modf() will return a struct that the caller must decode
intrinsicName = getIntrinsic(intrinsicID, operand->getType(),
operand->getType(), operand->getType());
break;
+
+ // Atomics don't have flexible types
+ case llvm::Intrinsic::gla_atomicCounterLoad:
+ case llvm::Intrinsic::gla_atomicCounterIncrement:
+ case llvm::Intrinsic::gla_atomicCounterDecrement:
+ intrinsicName = getIntrinsic(intrinsicID);
+ break;
+
default:
// Unary intrinsics that have operand and dest with same flexible
type
intrinsicName = getIntrinsic(intrinsicID, operand->getType(),
operand->getType());
@@ -2301,6 +2309,18 @@
// first argument can be scalar, return and second argument match
intrinsicName = getIntrinsic(intrinsicID, operand1->getType(),
operand0->getType(), operand1->getType());
break;
+
+ // atomics don't have any flexible arguments
+ case llvm::Intrinsic::gla_atomicAdd:
+ case llvm::Intrinsic::gla_atomicMin:
+ case llvm::Intrinsic::gla_atomicMax:
+ case llvm::Intrinsic::gla_atomicAnd:
+ case llvm::Intrinsic::gla_atomicOr:
+ case llvm::Intrinsic::gla_atomicXor:
+ case llvm::Intrinsic::gla_atomicExchange:
+ intrinsicName = getIntrinsic(intrinsicID);
+ break;
+
default:
// Binary intrinsics that have operand and dest with same flexible
type
intrinsicName = getIntrinsic(intrinsicID, operand0->getType(),
operand0->getType(), operand1->getType());
@@ -2330,6 +2350,12 @@
// first argument can be scalar, return and second argument match
intrinsicName = getIntrinsic(intrinsicID, operand2->getType(),
operand0->getType(), operand1->getType(), operand2->getType());
break;
+
+ // atomics don't have any flexible arguments
+ case llvm::Intrinsic::gla_atomicCompExchange:
+ intrinsicName = getIntrinsic(intrinsicID);
+ break;
+
default:
// Use operand0 type as result type
intrinsicName = getIntrinsic(intrinsicID, operand0->getType(),
operand0->getType(), operand1->getType(), operand2->getType());
=======================================
--- /trunk/Core/metadata.h Tue Jul 7 07:02:50 2015 UTC
+++ /trunk/Core/metadata.h Fri Jul 24 20:27:07 2015 UTC
@@ -216,6 +216,10 @@
EMtlStd140,
EMtlStd430,
EMtlPacked,
+
+ // Atomic counter
+ EMtlAtomicUint,
+
EMtlCount,
};
=======================================
--- /trunk/Frontends/glslang/GlslangToTopVisitor.cpp Thu Jul 9 19:01:49
2015 UTC
+++ /trunk/Frontends/glslang/GlslangToTopVisitor.cpp Fri Jul 24 20:27:07
2015 UTC
@@ -228,10 +228,11 @@
}
} else {
switch (type.getBasicType()) {
- default: mdType = gla::EMtlNone; break;
- case glslang::EbtSampler: mdType = gla::EMtlSampler; break;
- case glslang::EbtStruct: mdType = gla::EMtlAggregate; break;
- case glslang::EbtUint: mdType = gla::EMtlUnsigned; break;
+ default: mdType = gla::EMtlNone; break;
+ case glslang::EbtSampler: mdType = gla::EMtlSampler; break;
+ case glslang::EbtStruct: mdType = gla::EMtlAggregate; break;
+ case glslang::EbtUint: mdType = gla::EMtlUnsigned; break;
+ case glslang::EbtAtomicUint: mdType = gla::EMtlAtomicUint; break;
case glslang::EbtBlock:
switch (type.getQualifier().storage) {
case glslang::EvqUniform:
@@ -1465,6 +1466,7 @@
glaType = gla::GetBoolType(context);
break;
case glslang::EbtInt:
+ case glslang::EbtAtomicUint:
case glslang::EbtSampler:
glaType = gla::GetIntType(context);
break;
@@ -2406,6 +2408,16 @@
intrinsicID = llvm::Intrinsic::gla_endStreamPrimitive;
break;
+ case glslang::EOpAtomicCounterIncrement:
+ intrinsicID = llvm::Intrinsic::gla_atomicCounterIncrement;
+ break;
+ case glslang::EOpAtomicCounterDecrement:
+ intrinsicID = llvm::Intrinsic::gla_atomicCounterDecrement;
+ break;
+ case glslang::EOpAtomicCounter:
+ intrinsicID = llvm::Intrinsic::gla_atomicCounterLoad;
+ break;
+
default:
break;
}
@@ -2501,6 +2513,32 @@
case glslang::EOpRefract:
intrinsicID = llvm::Intrinsic::gla_fRefract;
break;
+
+ case glslang::EOpAtomicAdd:
+ intrinsicID = llvm::Intrinsic::gla_atomicAdd;
+ break;
+ case glslang::EOpAtomicMin:
+ intrinsicID = llvm::Intrinsic::gla_atomicMin;
+ break;
+ case glslang::EOpAtomicMax:
+ intrinsicID = llvm::Intrinsic::gla_atomicMax;
+ break;
+ case glslang::EOpAtomicAnd:
+ intrinsicID = llvm::Intrinsic::gla_atomicAnd;
+ break;
+ case glslang::EOpAtomicOr:
+ intrinsicID = llvm::Intrinsic::gla_atomicOr;
+ break;
+ case glslang::EOpAtomicXor:
+ intrinsicID = llvm::Intrinsic::gla_atomicXor;
+ break;
+ case glslang::EOpAtomicExchange:
+ intrinsicID = llvm::Intrinsic::gla_atomicExchange;
+ break;
+ case glslang::EOpAtomicCompSwap:
+ intrinsicID = llvm::Intrinsic::gla_atomicCompExchange;
+ break;
+
default:
break;
}
=======================================
--- /trunk/test/runtests Sun May 31 21:29:58 2015 UTC
+++ /trunk/test/runtests Fri Jul 24 20:27:07 2015 UTC
@@ -59,6 +59,7 @@
doWhileLoop.frag \
loops.frag \
forLoop.frag \
+ atomic.comp \
130.frag \
140.frag \
150.vert \