Revision: 1110
Author:
jo...@lunarg.com
Date: Thu Aug 20 20:53:17 2015 UTC
Log: Full stack: Change design of image ops to support 1:1 mapping
between GLSL built-ins and LLVM intrinsics. This changes the set of
intrinsics and what operands they take (texFlags for image-op is no longer
needed).
https://code.google.com/p/lunarglass/source/detail?r=1110
Modified:
/trunk/Backends/GLSL/BottomToGLSL.cpp
/trunk/Core/LLVM/llvm-3.4/include/llvm/IR/IntrinsicsLunarGLASSTop.td
/trunk/Core/LunarGLASSTopIR.h
/trunk/Core/TopBuilder.cpp
/trunk/Core/TopBuilder.h
/trunk/Frontends/glslang/GlslangToTopVisitor.cpp
/trunk/test/baseResults/310.comp.out
/trunk/test/baseResults/aep.vert.out
=======================================
--- /trunk/Backends/GLSL/BottomToGLSL.cpp Thu Aug 20 01:05:24 2015 UTC
+++ /trunk/Backends/GLSL/BottomToGLSL.cpp Thu Aug 20 20:53:17 2015 UTC
@@ -2992,7 +2992,6 @@
// Handle texturing
bool gather = false;
bool refZemitted = false;
- bool load = false;
switch (llvmInstruction->getIntrinsicID()) {
case llvm::Intrinsic::gla_queryTextureSize:
case llvm::Intrinsic::gla_queryTextureSizeNoLod:
@@ -3032,22 +3031,28 @@
case llvm::Intrinsic::gla_imageLoad:
case llvm::Intrinsic::gla_fImageLoad:
- load = true;
- // fall through
case llvm::Intrinsic::gla_imageStoreI:
case llvm::Intrinsic::gla_imageStoreF:
- case llvm::Intrinsic::gla_imageOp:
+ case llvm::Intrinsic::gla_imageAtomicAdd:
+ case llvm::Intrinsic::gla_imageAtomicMin:
+ case llvm::Intrinsic::gla_imageAtomicMax:
+ case llvm::Intrinsic::gla_imageAtomicAnd:
+ case llvm::Intrinsic::gla_imageAtomicOr:
+ case llvm::Intrinsic::gla_imageAtomicXor:
+ case llvm::Intrinsic::gla_iImageAtomicExchange:
+ case llvm::Intrinsic::gla_fImageAtomicExchange:
+ case llvm::Intrinsic::gla_imageAtomicCompExchange:
{
bool needConversion =
samplerIsUint(llvmInstruction->getOperand(GetTextureOpIndex(ETOSamplerLoc)));
if (needConversion)
ConversionStart(assignment, llvmInstruction->getType(), false);
- emitGlaSamplerFunction(assignment, llvmInstruction,
GetConstantInt(llvmInstruction->getOperand(GetTextureOpIndex(ETOFlag))));
+ emitGlaSamplerFunction(assignment, llvmInstruction, 0);
assignment << "(";
- emitGlaOperand(assignment,
llvmInstruction->getOperand(GetTextureOpIndex(ETOSamplerLoc)));
+ emitGlaOperand(assignment, llvmInstruction->getOperand(1));
assignment << ", ";
- emitGlaOperand(assignment,
llvmInstruction->getOperand(GetTextureOpIndex(ETOCoord)));
+ emitGlaOperand(assignment, llvmInstruction->getOperand(2));
- for (int op = GetTextureOpIndex(ETOCoord) + 1; op <
(int)llvmInstruction->getNumOperands() - 1; ++op) {
+ for (int op = 3; op < (int)llvmInstruction->getNumOperands() - 1;
++op) {
assignment << ", ";
emitGlaOperand(assignment, llvmInstruction->getOperand(op));
}
@@ -3694,26 +3699,23 @@
// TODO: uint functionality: See if it's a uint sampler, requiring a
constructor to convert it
- int imageOp = (texFlags & ETFImageOp) >> ImageOpShift;
- if (imageOp) {
- switch (imageOp) {
- case EImageLoad: out << "imageLoad"; break;
- case EImageStore: out << "imageStore"; break;
- case EImageAtomicAdd: out << "imageAtomicAdd"; break;
- case EImageAtomicMin: out << "imageAtomicMin"; break;
- case EImageAtomicMax: out << "imageAtomicMax"; break;
- case EImageAtomicAnd: out << "imageAtomicAnd"; break;
- case EImageAtomicOr: out << "imageAtomicOr"; break;
- case EImageAtomicXor: out << "imageAtomicXor"; break;
- case EImageAtomicExchange: out << "imageAtomicExchange"; break;
- case EImageAtomicCompSwap: out << "imageAtomicCompSwap"; break;
- default:
- UnsupportedFunctionality("image op");
- break;
- }
-
- return;
- }
+ switch (llvmInstruction->getIntrinsicID()) {
+ case llvm::Intrinsic::gla_imageLoad:
+ case llvm::Intrinsic::gla_fImageLoad: out
<< "imageLoad"; return;
+ case llvm::Intrinsic::gla_imageStoreI:
+ case llvm::Intrinsic::gla_imageStoreF: out
<< "imageStore"; return;
+ case llvm::Intrinsic::gla_imageAtomicAdd: out
<< "imageAtomicAdd"; return;
+ case llvm::Intrinsic::gla_imageAtomicMin: out
<< "imageAtomicMin"; return;
+ case llvm::Intrinsic::gla_imageAtomicMax: out
<< "imageAtomicMax"; return;
+ case llvm::Intrinsic::gla_imageAtomicAnd: out
<< "imageAtomicAnd"; return;
+ case llvm::Intrinsic::gla_imageAtomicOr: out
<< "imageAtomicOr"; return;
+ case llvm::Intrinsic::gla_imageAtomicXor: out
<< "imageAtomicXor"; return;
+ case llvm::Intrinsic::gla_iImageAtomicExchange:
+ case llvm::Intrinsic::gla_fImageAtomicExchange: out
<< "imageAtomicExchange"; return;
+ case llvm::Intrinsic::gla_imageAtomicCompExchange: out
<< "imageAtomicCompSwap"; return;
+ default:
+ break;
+ }
// Original style shadowing returns vec4 while 2nd generation returns
float,
// so, have to stick to old-style for those cases.
=======================================
--- /trunk/Core/LLVM/llvm-3.4/include/llvm/IR/IntrinsicsLunarGLASSTop.td
Thu Aug 20 01:05:24 2015 UTC
+++ /trunk/Core/LLVM/llvm-3.4/include/llvm/IR/IntrinsicsLunarGLASSTop.td
Thu Aug 20 20:53:17 2015 UTC
@@ -215,12 +215,21 @@
// Images
let TargetPrefix = "gla" in {
- def int_gla_imageLoad : Intrinsic<[llvm_anyint_ty], [llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty], [IntrReadArgMem]>;
- def int_gla_fImageLoad: Intrinsic<[llvm_anyfloat_ty], [llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty], [IntrReadArgMem]>;
- def int_gla_imageStoreI: Intrinsic<[], [llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_anyint_ty, llvm_v4i32_ty], [IntrReadWriteArgMem]>;
- def int_gla_imageStoreF: Intrinsic<[], [llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_anyint_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
- def int_gla_imageOp: Intrinsic<[llvm_i32_ty], [llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
- def int_gla_queryImageSize: Intrinsic<[llvm_anyint_ty], [llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_gla_imageLoad : Intrinsic<[llvm_anyint_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty],
[IntrReadArgMem]>;
+ def int_gla_fImageLoad: Intrinsic<[llvm_anyfloat_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty],
[IntrReadArgMem]>;
+ def int_gla_imageStoreI: Intrinsic<[],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_v4i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageStoreF: Intrinsic<[],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_v4f32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicAdd: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicMin: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicMax: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicAnd: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicOr: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicXor: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_iImageAtomicExchange: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_fImageAtomicExchange: Intrinsic<[llvm_float_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_float_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_imageAtomicCompExchange: Intrinsic<[llvm_i32_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty],
[IntrReadWriteArgMem]>;
+ def int_gla_queryImageSize: Intrinsic<[llvm_anyint_ty],
[llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_gla_queryImageSamples: Intrinsic<[llvm_anyint_ty],
[llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
}
// Geometry
=======================================
--- /trunk/Core/LunarGLASSTopIR.h Thu Aug 20 01:05:24 2015 UTC
+++ /trunk/Core/LunarGLASSTopIR.h Thu Aug 20 20:53:17 2015 UTC
@@ -89,10 +89,7 @@
ETFRefZArg = 0x00000800,
ETFProjectedArg = 0x00001000,
ETFOffsets = 0x00002000, // means offset argument is an
array that needs to be broken up
- //ETF = 0x00008000, // placeholder for future growth
- ETFImageOp = 0x000F0000, // wide slot to hold an EImageOp
value, see ImageOpShift below and EImageOp above
};
- static const int ImageOpShift = 16;
// Texture op, for mapping operands
enum ETextureOperand {
=======================================
--- /trunk/Core/TopBuilder.cpp Thu Aug 20 01:05:24 2015 UTC
+++ /trunk/Core/TopBuilder.cpp Thu Aug 20 20:53:17 2015 UTC
@@ -1714,8 +1714,7 @@
return createSwizzle(precision, scalar, 0x00, vectorType);
}
-// Accept all parameters needed to create LunarGLASS texture intrinsics.
-// Select the correct intrinsic based on the inputs, and make the call.
+// Comments in header
llvm::Value* Builder::createTextureCall(gla::EMdPrecision precision,
llvm::Type* resultType, gla::ESamplerType samplerType, int texFlags, const
TextureParameters& parameters, const char* name)
{
bool floatReturn = gla::GetBasicType(resultType)->isFloatTy();
@@ -1928,40 +1927,55 @@
return instr;
}
-// Accept all parameters needed to create LunarGLASS image intrinsics.
-// Select the correct intrinsic based on the inputs, and make the call.
-llvm::Value* Builder::createImageCall(gla::EMdPrecision precision,
llvm::Type* resultType, gla::ESamplerType samplerType, int texFlags,
+// Comments in header
+llvm::Value* Builder::createImageCall(gla::EMdPrecision precision,
llvm::Type* resultType, gla::ESamplerType samplerType, EImageOp imageOp,
const TextureParameters& parameters,
const char* name)
{
name = name ? name : "image";
// Max args based on LunarGLASS TopIR, no SOA
- static const int maxArgs = 6;
+ static const int maxArgs = 5;
llvm::Value* imageArgs[maxArgs] = {};
// Base case: First arguments are fixed
- int numArgs = 4;
- imageArgs[GetTextureOpIndex(ETOSamplerType)] =
MakeIntConstant(context, samplerType);
- imageArgs[GetTextureOpIndex(ETOSamplerLoc)] = parameters.ETPSampler;
- imageArgs[GetTextureOpIndex(ETOFlag)] =
MakeUnsignedConstant(context, *(int*)&texFlags);
- imageArgs[GetTextureOpIndex(ETOCoord)] = parameters.ETPCoords;
+ int numArgs = 3;
+ imageArgs[0] = MakeIntConstant(context, samplerType);
+ imageArgs[1] = parameters.ETPSampler;
+ imageArgs[2] = parameters.ETPCoords;
// Add the data argument if needed, and select which intrinsic to call.
llvm::Intrinsic::ID intrinsicID = llvm::Intrinsic::not_intrinsic;
- switch ((texFlags & ETFImageOp) >> ImageOpShift) {
+ switch (imageOp) {
case EImageAtomicAdd:
case EImageAtomicMin:
case EImageAtomicMax:
case EImageAtomicAnd:
case EImageAtomicOr:
case EImageAtomicXor:
- case EImageAtomicExchange:
- intrinsicID = llvm::Intrinsic::gla_imageOp;
imageArgs[numArgs] = parameters.ETPData;
++numArgs;
+ switch (imageOp) {
+ case EImageAtomicAdd: intrinsicID =
llvm::Intrinsic::gla_imageAtomicAdd; break;
+ case EImageAtomicMin: intrinsicID =
llvm::Intrinsic::gla_imageAtomicMin; break;
+ case EImageAtomicMax: intrinsicID =
llvm::Intrinsic::gla_imageAtomicMax; break;
+ case EImageAtomicAnd: intrinsicID =
llvm::Intrinsic::gla_imageAtomicAnd; break;
+ case EImageAtomicOr: intrinsicID =
llvm::Intrinsic::gla_imageAtomicOr; break;
+ case EImageAtomicXor: intrinsicID =
llvm::Intrinsic::gla_imageAtomicXor; break;
+ default:
+ assert(0);
+ break;
+ }
+ break;
+ case EImageAtomicExchange:
+ if (gla::GetBasicType(resultType)->isFloatTy())
+ intrinsicID = llvm::Intrinsic::gla_fImageAtomicExchange;
+ else
+ intrinsicID = llvm::Intrinsic::gla_iImageAtomicExchange;
+ imageArgs[numArgs] = parameters.ETPData;
+ ++numArgs;
break;
case EImageAtomicCompSwap:
- intrinsicID = llvm::Intrinsic::gla_imageOp;
+ intrinsicID = llvm::Intrinsic::gla_imageAtomicCompExchange;
imageArgs[numArgs] = parameters.ETPCompare;
++numArgs;
imageArgs[numArgs] = parameters.ETPData;
@@ -1990,19 +2004,27 @@
// Initialize required operands based on intrinsic
switch (intrinsicID) {
+
+ // both result and coord are varying type
case llvm::Intrinsic::gla_fImageLoad:
case llvm::Intrinsic::gla_imageLoad:
- intrinsic = getIntrinsic(intrinsicID, resultType,
imageArgs[GetTextureOpIndex(ETOCoord)]->getType());
+ intrinsic = getIntrinsic(intrinsicID, resultType,
imageArgs[2]->getType());
break;
+ // only the coord is varying type
case llvm::Intrinsic::gla_imageStoreF:
case llvm::Intrinsic::gla_imageStoreI:
+ case llvm::Intrinsic::gla_imageAtomicAdd:
+ case llvm::Intrinsic::gla_imageAtomicMin:
+ case llvm::Intrinsic::gla_imageAtomicMax:
+ case llvm::Intrinsic::gla_imageAtomicAnd:
+ case llvm::Intrinsic::gla_imageAtomicOr:
+ case llvm::Intrinsic::gla_imageAtomicXor:
+ case llvm::Intrinsic::gla_fImageAtomicExchange:
+ case llvm::Intrinsic::gla_iImageAtomicExchange:
+ case llvm::Intrinsic::gla_imageAtomicCompExchange:
name = 0;
- intrinsic = getIntrinsic(intrinsicID,
imageArgs[GetTextureOpIndex(ETOCoord)]->getType());
- break;
-
- case llvm::Intrinsic::gla_imageOp:
- intrinsic = getIntrinsic(intrinsicID,
imageArgs[GetTextureOpIndex(ETOCoord)]->getType());
+ intrinsic = getIntrinsic(intrinsicID, imageArgs[2]->getType());
break;
default:
=======================================
--- /trunk/Core/TopBuilder.h Thu Aug 20 01:05:24 2015 UTC
+++ /trunk/Core/TopBuilder.h Thu Aug 20 20:53:17 2015 UTC
@@ -377,9 +377,16 @@
llvm::Value* ETPCompare; // for image*() comparison argument
};
- // Select the correct intrinsic based on all inputs, and make the call
+ // Accept all parameters needed to create LunarGLASS texture
intrinsics.
+ // (Accessing the texture, not querying; requires a coordinate.)
+ // Select the correct intrinsic based on the inputs, and make the call.
llvm::Value* createTextureCall(EMdPrecision, llvm::Type*,
ESamplerType, int texFlags, const TextureParameters&, const char* name = 0);
- llvm::Value* createImageCall(EMdPrecision, llvm::Type*, ESamplerType,
int texFlags, const TextureParameters&, const char* name = 0);
+
+ // Accept all parameters needed to create LunarGLASS image intrinsics
(accessing the image, not querying it).
+ // (Accessing the image, not querying; requires a coordinate.)
+ // Select the correct intrinsic based on the inputs, and make the call.
+ llvm::Value* createImageCall(EMdPrecision, llvm::Type*, ESamplerType,
EImageOp, const TextureParameters&, const char* name = 0);
+
llvm::Value* createTextureQueryCall(EMdPrecision, llvm::Intrinsic::ID,
llvm::Type*, llvm::Constant*, llvm::Value*, llvm::Value*, const char* name
= 0);
llvm::Value* createSamplePositionCall(EMdPrecision, llvm::Type*,
llvm::Value*);
llvm::Value* createBitFieldExtractCall(EMdPrecision, llvm::Value*,
llvm::Value*, llvm::Value*, bool isSigned);
=======================================
--- /trunk/Frontends/glslang/GlslangToTopVisitor.cpp Thu Aug 20 01:05:24
2015 UTC
+++ /trunk/Frontends/glslang/GlslangToTopVisitor.cpp Thu Aug 20 20:53:17
2015 UTC
@@ -103,7 +103,7 @@
void translateArguments(glslang::TIntermUnary&,
std::vector<llvm::Value*>& arguments);
llvm::Value* handleTextureCall(glslang::TIntermOperator*);
llvm::Value* handleTexImageQuery(const glslang::TIntermOperator*,
const glslang::TCrackedTextureOp&, const std::vector<llvm::Value*>&
arguments, gla::ESamplerType);
- llvm::Value* handleImageAccess(const glslang::TIntermOperator*, const
glslang::TCrackedTextureOp&, const std::vector<llvm::Value*>& arguments,
gla::ESamplerType, int flags);
+ llvm::Value* handleImageAccess(const glslang::TIntermOperator*, const
std::vector<llvm::Value*>& arguments, gla::ESamplerType);
llvm::Value* handleTextureAccess(const glslang::TIntermOperator*,
const glslang::TCrackedTextureOp&, const std::vector<llvm::Value*>&
arguments, gla::ESamplerType, int flags);
llvm::Value* handleUserFunctionCall(const glslang::TIntermAggregate*);
@@ -1710,9 +1710,16 @@
glslang::TCrackedTextureOp cracked;
node->crackTexture(cracked);
+ // Steer off queries
if (cracked.query || node->getOp() == glslang::EOpImageQuerySize ||
node->getOp() == glslang::EOpImageQuerySamples)
return handleTexImageQuery(node, cracked, arguments, samplerType);
+ // Steer off image accesses
+ if (sampler.image)
+ return handleImageAccess(node, arguments, samplerType);
+
+ // Handle texture accesses...
+
int texFlags = 0;
if (sampler.arrayed)
@@ -1721,10 +1728,7 @@
if (sampler.shadow)
texFlags |= gla::ETFShadow;
- if (sampler.image)
- return handleImageAccess(node, cracked, arguments, samplerType,
texFlags);
- else
- return handleTextureAccess(node, cracked, arguments, samplerType,
texFlags);
+ return handleTextureAccess(node, cracked, arguments, samplerType,
texFlags);
}
llvm::Value* TGlslangToTopTraverser::handleTexImageQuery(const
glslang::TIntermOperator* node, const glslang::TCrackedTextureOp& cracked,
const std::vector<llvm::Value*>& arguments, gla::ESamplerType samplerType)
@@ -1773,7 +1777,7 @@
}
}
-llvm::Value* TGlslangToTopTraverser::handleImageAccess(const
glslang::TIntermOperator* node, const glslang::TCrackedTextureOp& cracked,
const std::vector<llvm::Value*>& arguments, gla::ESamplerType samplerType,
int texFlags)
+llvm::Value* TGlslangToTopTraverser::handleImageAccess(const
glslang::TIntermOperator* node, const std::vector<llvm::Value*>& arguments,
gla::ESamplerType samplerType)
{
// set the arguments
gla::Builder::TextureParameters params = {};
@@ -1796,8 +1800,6 @@
gla::UnsupportedFunctionality("image access");
break;
}
-
- texFlags |= (imageOp << gla::ImageOpShift);
if (imageOp != gla::EImageLoad) {
if (imageOp == gla::EImageAtomicCompSwap) {
@@ -1807,7 +1809,7 @@
params.ETPData = arguments[2];
}
- return glaBuilder->createImageCall(GetMdPrecision(node->getType()),
convertGlslangToGlaType(node->getType()), samplerType, texFlags, params,
leftName);
+ return glaBuilder->createImageCall(GetMdPrecision(node->getType()),
convertGlslangToGlaType(node->getType()), samplerType, imageOp, params,
leftName);
}
llvm::Value* TGlslangToTopTraverser::handleTextureAccess(const
glslang::TIntermOperator* node, const glslang::TCrackedTextureOp& cracked,
=======================================
--- /trunk/test/baseResults/310.comp.out Mon May 25 23:01:59 2015 UTC
+++ /trunk/test/baseResults/310.comp.out Thu Aug 20 20:53:17 2015 UTC
@@ -68,38 +68,38 @@
store <4 x float> %22, <4 x float> addrspace(2)* %23
%24 = load i32 addrspace(1)* @iimg2Drgba, !gla.uniform !10
%25 = load <2 x i32>* @coord2D
- %iv1 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32 2, i32 %24,
i32 65536, <2 x i32> %25), !gla.precision !40
+ %iv1 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32 2, i32 %24, <2
x i32> %25), !gla.precision !40
store <4 x i32> %iv1, <4 x i32>* %iv
%26 = load i32 addrspace(1)* @img3Drgba, !gla.uniform !13
%27 = load <3 x i32>* @coord3D
- %v3 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 3, i32 %26,
i32 65536, <3 x i32> %27), !gla.precision !40
+ %v3 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 3, i32 %26,
<3 x i32> %27), !gla.precision !40
store <4 x float> %v3, <4 x float>* %v
%28 = load i32 addrspace(1)* @uimgCube, !gla.uniform !16
%29 = load <3 x i32>* @coord3D
- %uv4 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32 4, i32 %28,
i32 65536, <3 x i32> %29), !gla.precision !40
+ %uv4 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32 4, i32 %28, <3
x i32> %29), !gla.precision !40
store <4 x i32> %uv4, <4 x i32>* %uv
%30 = load i32 addrspace(1)* @img2DA, !gla.uniform !19
%31 = load <3 x i32>* @coord3D
- %v5 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 2, i32 %30,
i32 65552, <3 x i32> %31), !gla.precision !40
+ %v5 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 2, i32 %30,
<3 x i32> %31), !gla.precision !40
%32 = load <4 x float>* %v
%v6 = fadd <4 x float> %32, %v5, !gla.precision !40
store <4 x float> %v6, <4 x float>* %v
%33 = load i32 addrspace(1)* @iimg2D, !gla.uniform !22
%34 = load <2 x i32>* @coord2D
%35 = load <4 x i32>* %iv
- call void @llvm.gla.imageStoreI.v2i32(i32 2, i32 %33, i32 131072, <2 x
i32> %34, <4 x i32> %35), !gla.precision !40
+ call void @llvm.gla.imageStoreI.v2i32(i32 2, i32 %33, <2 x i32> %34, <4
x i32> %35), !gla.precision !40
%36 = load i32 addrspace(1)* @uimgCube, !gla.uniform !16
%37 = load <3 x i32>* @coord3D
%38 = load <4 x i32>* %uv
- call void @llvm.gla.imageStoreI.v3i32(i32 4, i32 %36, i32 131072, <3 x
i32> %37, <4 x i32> %38), !gla.precision !40
+ call void @llvm.gla.imageStoreI.v3i32(i32 4, i32 %36, <3 x i32> %37, <4
x i32> %38), !gla.precision !40
%39 = load i32 addrspace(1)* @wimg2DA, !gla.uniform !25
%40 = load <3 x i32>* @coord3D
%41 = load <4 x float>* %v
- call void @llvm.gla.imageStoreF.v3i32(i32 2, i32 %39, i32 131088, <3 x
i32> %40, <4 x float> %41), !gla.precision !40
+ call void @llvm.gla.imageStoreF.v3i32(i32 2, i32 %39, <3 x i32> %40, <4
x float> %41), !gla.precision !40
%42 = load i32 addrspace(1)* @wimg2D, !gla.uniform !28
%43 = load <2 x i32>* @coord2D
%44 = load <4 x float>* %v
- call void @llvm.gla.imageStoreF.v2i32(i32 2, i32 %42, i32 131072, <2 x
i32> %43, <4 x float> %44), !gla.precision !40
+ call void @llvm.gla.imageStoreF.v2i32(i32 2, i32 %42, <2 x i32> %43, <4
x float> %44), !gla.precision !40
br label %stage-epilogue
stage-epilogue: ; preds = %mainBody
@@ -113,25 +113,25 @@
declare void @llvm.gla.barrier() #0
; Function Attrs: nounwind readonly
-declare <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32, i32, i32, <2 x
i32>) #1
+declare <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32, i32, <2 x i32>) #1
; Function Attrs: nounwind readonly
-declare <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32, i32, i32, <3 x
i32>) #1
+declare <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32, i32, <3 x i32>)
#1
; Function Attrs: nounwind readonly
-declare <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32, i32, i32, <3 x
i32>) #1
+declare <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32, i32, <3 x i32>) #1
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreI.v2i32(i32, i32, i32, <2 x i32>, <4 x
i32>) #0
+declare void @llvm.gla.imageStoreI.v2i32(i32, i32, <2 x i32>, <4 x i32>) #0
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreI.v3i32(i32, i32, i32, <3 x i32>, <4 x
i32>) #0
+declare void @llvm.gla.imageStoreI.v3i32(i32, i32, <3 x i32>, <4 x i32>) #0
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreF.v3i32(i32, i32, i32, <3 x i32>, <4 x
float>) #0
+declare void @llvm.gla.imageStoreF.v3i32(i32, i32, <3 x i32>, <4 x float>)
#0
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreF.v2i32(i32, i32, i32, <2 x i32>, <4 x
float>) #0
+declare void @llvm.gla.imageStoreF.v2i32(i32, i32, <2 x i32>, <4 x float>)
#0
attributes #0 = { nounwind }
attributes #1 = { nounwind readonly }
@@ -233,22 +233,22 @@
store <4 x float> %3, <4 x float> addrspace(2)* %9, align 16
%10 = load i32 addrspace(1)* @iimg2Drgba, align 4, !gla.uniform !10
%11 = load <2 x i32>* @coord2D, align 8
- %iv1 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32 2, i32 %10,
i32 65536, <2 x i32> %11), !gla.precision !40
+ %iv1 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32 2, i32 %10, <2
x i32> %11), !gla.precision !40
%12 = load i32 addrspace(1)* @img3Drgba, align 4, !gla.uniform !13
%13 = load <3 x i32>* @coord3D, align 16
- %v3 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 3, i32 %12,
i32 65536, <3 x i32> %13), !gla.precision !40
+ %v3 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 3, i32 %12,
<3 x i32> %13), !gla.precision !40
%14 = load i32 addrspace(1)* @uimgCube, align 4, !gla.uniform !16
- %uv4 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32 4, i32 %14,
i32 65536, <3 x i32> %13), !gla.precision !40
+ %uv4 = call <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32 4, i32 %14, <3
x i32> %13), !gla.precision !40
%15 = load i32 addrspace(1)* @img2DA, align 4, !gla.uniform !19
- %v5 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 2, i32 %15,
i32 65552, <3 x i32> %13), !gla.precision !40
+ %v5 = call <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32 2, i32 %15,
<3 x i32> %13), !gla.precision !40
%v6 = fadd <4 x float> %v3, %v5, !gla.precision !40
%16 = load i32 addrspace(1)* @iimg2D, align 4, !gla.uniform !22
- call void @llvm.gla.imageStoreI.v2i32(i32 2, i32 %16, i32 131072, <2 x
i32> %11, <4 x i32> %iv1), !gla.precision !40
- call void @llvm.gla.imageStoreI.v3i32(i32 4, i32 %14, i32 131072, <3 x
i32> %13, <4 x i32> %uv4), !gla.precision !40
+ call void @llvm.gla.imageStoreI.v2i32(i32 2, i32 %16, <2 x i32> %11, <4
x i32> %iv1), !gla.precision !40
+ call void @llvm.gla.imageStoreI.v3i32(i32 4, i32 %14, <3 x i32> %13, <4
x i32> %uv4), !gla.precision !40
%17 = load i32 addrspace(1)* @wimg2DA, align 4, !gla.uniform !25
- call void @llvm.gla.imageStoreF.v3i32(i32 2, i32 %17, i32 131088, <3 x
i32> %13, <4 x float> %v6), !gla.precision !40
+ call void @llvm.gla.imageStoreF.v3i32(i32 2, i32 %17, <3 x i32> %13, <4
x float> %v6), !gla.precision !40
%18 = load i32 addrspace(1)* @wimg2D, align 4, !gla.uniform !28
- call void @llvm.gla.imageStoreF.v2i32(i32 2, i32 %18, i32 131072, <2 x
i32> %11, <4 x float> %v6), !gla.precision !40
+ call void @llvm.gla.imageStoreF.v2i32(i32 2, i32 %18, <2 x i32> %11, <4
x float> %v6), !gla.precision !40
br label %stage-epilogue
stage-epilogue: ; preds = %entry
@@ -262,25 +262,25 @@
declare void @llvm.gla.barrier() #0
; Function Attrs: nounwind readonly
-declare <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32, i32, i32, <2 x
i32>) #1
+declare <4 x i32> @llvm.gla.imageLoad.v4i32.v2i32(i32, i32, <2 x i32>) #1
; Function Attrs: nounwind readonly
-declare <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32, i32, i32, <3 x
i32>) #1
+declare <4 x float> @llvm.gla.fImageLoad.v4f32.v3i32(i32, i32, <3 x i32>)
#1
; Function Attrs: nounwind readonly
-declare <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32, i32, i32, <3 x
i32>) #1
+declare <4 x i32> @llvm.gla.imageLoad.v4i32.v3i32(i32, i32, <3 x i32>) #1
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreI.v2i32(i32, i32, i32, <2 x i32>, <4 x
i32>) #0
+declare void @llvm.gla.imageStoreI.v2i32(i32, i32, <2 x i32>, <4 x i32>) #0
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreI.v3i32(i32, i32, i32, <3 x i32>, <4 x
i32>) #0
+declare void @llvm.gla.imageStoreI.v3i32(i32, i32, <3 x i32>, <4 x i32>) #0
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreF.v3i32(i32, i32, i32, <3 x i32>, <4 x
float>) #0
+declare void @llvm.gla.imageStoreF.v3i32(i32, i32, <3 x i32>, <4 x float>)
#0
; Function Attrs: nounwind
-declare void @llvm.gla.imageStoreF.v2i32(i32, i32, i32, <2 x i32>, <4 x
float>) #0
+declare void @llvm.gla.imageStoreF.v2i32(i32, i32, <2 x i32>, <4 x float>)
#0
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.gla.fSwizzle.v4f32.f32.v4i32(float, <4 x i32>) #2
=======================================
--- /trunk/test/baseResults/aep.vert.out Thu Aug 20 01:05:24 2015 UTC
+++ /trunk/test/baseResults/aep.vert.out Thu Aug 20 20:53:17 2015 UTC
@@ -447,71 +447,71 @@
%1 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
%2 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
%3 = extractelement <2 x i32> %2, i32 0, !gla.precision !85
- %image = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %0, i32 196608, <2
x i32> %1, i32 %3), !gla.precision !85
- %4 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %5 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %6 = load i32* %datu
- %image1 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %4, i32 196608, <2
x i32> %5, i32 %6), !gla.precision !85
- %7 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
- %8 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %9 = load i32* %dati
- %image2 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %7, i32 262144, <2
x i32> %8, i32 %9), !gla.precision !85
- %10 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %11 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %12 = load i32* %datu
- %image3 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %10, i32 262144,
<2 x i32> %11, i32 %12), !gla.precision !85
- %13 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %4 = call i32 @llvm.gla.imageAtomicAdd.v2i32(i32 2, i32 %0, <2 x
i32> %1, i32 %3), !gla.precision !85
+ %5 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %6 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %7 = load i32* %datu
+ %8 = call i32 @llvm.gla.imageAtomicAdd.v2i32(i32 2, i32 %5, <2 x
i32> %6, i32 %7), !gla.precision !85
+ %9 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %10 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %11 = load i32* %dati
+ %12 = call i32 @llvm.gla.imageAtomicMin.v2i32(i32 2, i32 %9, <2 x
i32> %10, i32 %11), !gla.precision !85
+ %13 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
%14 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %15 = load i32* %dati
- %image4 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %13, i32 327680,
<2 x i32> %14, i32 %15), !gla.precision !85
- %16 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %17 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %18 = load i32* %datu
- %image5 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %16, i32 327680,
<2 x i32> %17, i32 %18), !gla.precision !85
- %19 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
- %20 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %21 = load i32* %dati
- %image6 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %19, i32 393216,
<2 x i32> %20, i32 %21), !gla.precision !85
- %22 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %23 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %24 = load i32* %datu
- %image7 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %22, i32 393216,
<2 x i32> %23, i32 %24), !gla.precision !85
+ %15 = load i32* %datu
+ %16 = call i32 @llvm.gla.imageAtomicMin.v2i32(i32 2, i32 %13, <2 x
i32> %14, i32 %15), !gla.precision !85
+ %17 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %18 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %19 = load i32* %dati
+ %20 = call i32 @llvm.gla.imageAtomicMax.v2i32(i32 2, i32 %17, <2 x
i32> %18, i32 %19), !gla.precision !85
+ %21 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %22 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %23 = load i32* %datu
+ %24 = call i32 @llvm.gla.imageAtomicMax.v2i32(i32 2, i32 %21, <2 x
i32> %22, i32 %23), !gla.precision !85
%25 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
%26 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
%27 = load i32* %dati
- %image8 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %25, i32 458752,
<2 x i32> %26, i32 %27), !gla.precision !85
- %28 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %29 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %30 = load i32* %datu
- %image9 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %28, i32 458752,
<2 x i32> %29, i32 %30), !gla.precision !85
- %31 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
- %32 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %33 = load i32* %dati
- %image10 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %31, i32 524288,
<2 x i32> %32, i32 %33), !gla.precision !85
- %34 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %35 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %36 = load i32* %datu
- %image11 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %34, i32 524288,
<2 x i32> %35, i32 %36), !gla.precision !85
- %37 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %28 = call i32 @llvm.gla.imageAtomicAnd.v2i32(i32 2, i32 %25, <2 x
i32> %26, i32 %27), !gla.precision !85
+ %29 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %30 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %31 = load i32* %datu
+ %32 = call i32 @llvm.gla.imageAtomicAnd.v2i32(i32 2, i32 %29, <2 x
i32> %30, i32 %31), !gla.precision !85
+ %33 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %34 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %35 = load i32* %dati
+ %36 = call i32 @llvm.gla.imageAtomicOr.v2i32(i32 2, i32 %33, <2 x
i32> %34, i32 %35), !gla.precision !85
+ %37 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
%38 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %39 = load i32* %dati
- %image12 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %37, i32 589824,
<2 x i32> %38, i32 %39), !gla.precision !85
- %40 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
- %41 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %42 = load i32* %datu
- %image13 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %40, i32 589824,
<2 x i32> %41, i32 %42), !gla.precision !85
- %43 = load i32 addrspace(1)* @im2Df, !gla.uniform !78
- %44 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %45 = load float* %datf
- %image14 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %43, i32 589824,
<2 x i32> %44, float %45), !gla.precision !85
- %46 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
- %47 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %48 = load i32* %dati
- %image15 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %46, i32 655360,
<2 x i32> %47, i32 3, i32 %48), !gla.precision !85
- %49 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %39 = load i32* %datu
+ %40 = call i32 @llvm.gla.imageAtomicOr.v2i32(i32 2, i32 %37, <2 x
i32> %38, i32 %39), !gla.precision !85
+ %41 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %42 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %43 = load i32* %dati
+ %44 = call i32 @llvm.gla.imageAtomicXor.v2i32(i32 2, i32 %41, <2 x
i32> %42, i32 %43), !gla.precision !85
+ %45 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %46 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %47 = load i32* %datu
+ %48 = call i32 @llvm.gla.imageAtomicXor.v2i32(i32 2, i32 %45, <2 x
i32> %46, i32 %47), !gla.precision !85
+ %49 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
%50 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
- %51 = load i32* %datu
- %image16 = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %49, i32 655360,
<2 x i32> %50, i32 5, i32 %51), !gla.precision !85
+ %51 = load i32* %dati
+ %52 = call i32 @llvm.gla.iImageAtomicExchange.v2i32(i32 2, i32 %49, <2 x
i32> %50, i32 %51), !gla.precision !85
+ %53 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %54 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %55 = load i32* %datu
+ %56 = call i32 @llvm.gla.iImageAtomicExchange.v2i32(i32 2, i32 %53, <2 x
i32> %54, i32 %55), !gla.precision !85
+ %57 = load i32 addrspace(1)* @im2Df, !gla.uniform !78
+ %58 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %59 = load float* %datf
+ %60 = call float @llvm.gla.fImageAtomicExchange.v2i32(i32 2, i32 %57, <2
x i32> %58, float %59), !gla.precision !85
+ %61 = load i32 addrspace(1)* @im2Di, !gla.uniform !71
+ %62 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %63 = load i32* %dati
+ %64 = call i32 @llvm.gla.imageAtomicCompExchange.v2i32(i32 2, i32 %61,
<2 x i32> %62, i32 3, i32 %63), !gla.precision !85
+ %65 = load i32 addrspace(1)* @im2Du, !gla.uniform !75
+ %66 = load <2 x i32> addrspace(2)* @P, !gla.uniform !74
+ %67 = load i32* %datu
+ %68 = call i32 @llvm.gla.imageAtomicCompExchange.v2i32(i32 2, i32 %65,
<2 x i32> %66, i32 5, i32 %67), !gla.precision !85
ret void
}
@@ -528,7 +528,7 @@
declare i32 @llvm.gla.queryTextureSizeNoLod.i32(i32, i32) #1
; Function Attrs: nounwind readnone
-declare i32 @llvm.gla.queryImageSize.i32(i32, i32, i32) #1
+declare i32 @llvm.gla.queryImageSize.i32(i32, i32) #1
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.gla.fTexelFetchOffset.v4f32.i32.i32.i32(i32,
i32, i32, i32, i32, float, i32) #1
@@ -540,7 +540,7 @@
declare <3 x i32> @llvm.gla.queryTextureSize.v3i32(i32, i32, i32) #1
; Function Attrs: nounwind readnone
-declare <3 x i32> @llvm.gla.queryImageSize.v3i32(i32, i32, i32) #1
+declare <3 x i32> @llvm.gla.queryImageSize.v3i32(i32, i32) #1
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.gla.fTextureSample.v4f32.v4f32(i32, i32, i32, <4
x float>) #1
@@ -579,7 +579,31 @@
declare <4 x i32> @llvm.gla.texelFetchOffset.v4i32.v3i32.i32.i32(i32, i32,
i32, <3 x i32>, i32, float, i32) #1
; Function Attrs: nounwind
-declare i32 @llvm.gla.imageOp.v2i32(i32, i32, i32, <2 x i32>, i32) #2
+declare i32 @llvm.gla.imageAtomicAdd.v2i32(i32, i32, <2 x i32>, i32) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicMin.v2i32(i32, i32, <2 x i32>, i32) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicMax.v2i32(i32, i32, <2 x i32>, i32) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicAnd.v2i32(i32, i32, <2 x i32>, i32) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicOr.v2i32(i32, i32, <2 x i32>, i32) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicXor.v2i32(i32, i32, <2 x i32>, i32) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.iImageAtomicExchange.v2i32(i32, i32, <2 x i32>, i32)
#2
+
+; Function Attrs: nounwind
+declare float @llvm.gla.fImageAtomicExchange.v2i32(i32, i32, <2 x i32>,
float) #2
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicCompExchange.v2i32(i32, i32, <2 x i32>,
i32, i32) #2
attributes #0 = { alwaysinline }
attributes #1 = { nounwind readnone }
@@ -866,25 +890,25 @@
%59 = load i32 addrspace(1)* @im2Di, align 4, !gla.uniform !71
%60 = load <2 x i32> addrspace(2)* @P, align 8, !gla.uniform !74
%61 = extractelement <2 x i32> %60, i32 0, !gla.precision !85
- %image.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32 196608,
<2 x i32> %60, i32 %61), !gla.precision !85
- %62 = load i32 addrspace(1)* @im2Du, align 4, !gla.uniform !75
- %image1.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32 196608,
<2 x i32> %60, i32 7), !gla.precision !85
- %image2.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32 262144,
<2 x i32> %60, i32 4), !gla.precision !85
- %image3.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32 262144,
<2 x i32> %60, i32 7), !gla.precision !85
- %image4.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32 327680,
<2 x i32> %60, i32 4), !gla.precision !85
- %image5.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32 327680,
<2 x i32> %60, i32 7), !gla.precision !85
- %image6.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32 393216,
<2 x i32> %60, i32 4), !gla.precision !85
- %image7.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32 393216,
<2 x i32> %60, i32 7), !gla.precision !85
- %image8.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32 458752,
<2 x i32> %60, i32 4), !gla.precision !85
- %image9.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32 458752,
<2 x i32> %60, i32 7), !gla.precision !85
- %image10.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32
524288, <2 x i32> %60, i32 4), !gla.precision !85
- %image11.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32
524288, <2 x i32> %60, i32 7), !gla.precision !85
- %image12.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32
589824, <2 x i32> %60, i32 4), !gla.precision !85
- %image13.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32
589824, <2 x i32> %60, i32 7), !gla.precision !85
- %63 = load i32 addrspace(1)* @im2Df, align 4, !gla.uniform !78
- %image14.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %63, i32
589824, <2 x i32> %60, float 0x3FFCCCCCC0000000), !gla.precision !85
- %image15.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %59, i32
655360, <2 x i32> %60, i32 3, i32 4), !gla.precision !85
- %image16.i = call i32 @llvm.gla.imageOp.v2i32(i32 2, i32 %62, i32
655360, <2 x i32> %60, i32 5, i32 7), !gla.precision !85
+ %62 = call i32 @llvm.gla.imageAtomicAdd.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 %61), !gla.precision !85
+ %63 = load i32 addrspace(1)* @im2Du, align 4, !gla.uniform !75
+ %64 = call i32 @llvm.gla.imageAtomicAdd.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %65 = call i32 @llvm.gla.imageAtomicMin.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 4), !gla.precision !85
+ %66 = call i32 @llvm.gla.imageAtomicMin.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %67 = call i32 @llvm.gla.imageAtomicMax.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 4), !gla.precision !85
+ %68 = call i32 @llvm.gla.imageAtomicMax.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %69 = call i32 @llvm.gla.imageAtomicAnd.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 4), !gla.precision !85
+ %70 = call i32 @llvm.gla.imageAtomicAnd.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %71 = call i32 @llvm.gla.imageAtomicOr.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 4), !gla.precision !85
+ %72 = call i32 @llvm.gla.imageAtomicOr.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %73 = call i32 @llvm.gla.imageAtomicXor.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 4), !gla.precision !85
+ %74 = call i32 @llvm.gla.imageAtomicXor.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %75 = call i32 @llvm.gla.iImageAtomicExchange.v2i32(i32 2, i32 %59, <2 x
i32> %60, i32 4), !gla.precision !85
+ %76 = call i32 @llvm.gla.iImageAtomicExchange.v2i32(i32 2, i32 %63, <2 x
i32> %60, i32 7), !gla.precision !85
+ %77 = load i32 addrspace(1)* @im2Df, align 4, !gla.uniform !78
+ %78 = call float @llvm.gla.fImageAtomicExchange.v2i32(i32 2, i32 %77, <2
x i32> %60, float 0x3FFCCCCCC0000000), !gla.precision !85
+ %79 = call i32 @llvm.gla.imageAtomicCompExchange.v2i32(i32 2, i32 %59,
<2 x i32> %60, i32 3, i32 4), !gla.precision !85
+ %80 = call i32 @llvm.gla.imageAtomicCompExchange.v2i32(i32 2, i32 %63,
<2 x i32> %60, i32 5, i32 7), !gla.precision !85
br label %stage-epilogue
stage-epilogue: ; preds = %entry
@@ -907,7 +931,7 @@
declare i32 @llvm.gla.queryTextureSizeNoLod.i32(i32, i32) #0
; Function Attrs: nounwind readnone
-declare i32 @llvm.gla.queryImageSize.i32(i32, i32, i32) #0
+declare i32 @llvm.gla.queryImageSize.i32(i32, i32) #0
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.gla.fTexelFetchOffset.v4f32.i32.i32.i32(i32,
i32, i32, i32, i32, float, i32) #0
@@ -919,7 +943,7 @@
declare <3 x i32> @llvm.gla.queryTextureSize.v3i32(i32, i32, i32) #0
; Function Attrs: nounwind readnone
-declare <3 x i32> @llvm.gla.queryImageSize.v3i32(i32, i32, i32) #0
+declare <3 x i32> @llvm.gla.queryImageSize.v3i32(i32, i32) #0
; Function Attrs: nounwind readnone
declare <4 x float> @llvm.gla.fTextureSample.v4f32.v4f32(i32, i32, i32, <4
x float>) #0
@@ -958,7 +982,31 @@
declare <4 x i32> @llvm.gla.texelFetchOffset.v4i32.v3i32.i32.i32(i32, i32,
i32, <3 x i32>, i32, float, i32) #0
; Function Attrs: nounwind
-declare i32 @llvm.gla.imageOp.v2i32(i32, i32, i32, <2 x i32>, i32) #1
+declare i32 @llvm.gla.imageAtomicAdd.v2i32(i32, i32, <2 x i32>, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicMin.v2i32(i32, i32, <2 x i32>, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicMax.v2i32(i32, i32, <2 x i32>, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicAnd.v2i32(i32, i32, <2 x i32>, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicOr.v2i32(i32, i32, <2 x i32>, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicXor.v2i32(i32, i32, <2 x i32>, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.iImageAtomicExchange.v2i32(i32, i32, <2 x i32>, i32)
#1
+
+; Function Attrs: nounwind
+declare float @llvm.gla.fImageAtomicExchange.v2i32(i32, i32, <2 x i32>,
float) #1
+
+; Function Attrs: nounwind
+declare i32 @llvm.gla.imageAtomicCompExchange.v2i32(i32, i32, <2 x i32>,
i32, i32) #1
; Function Attrs: nounwind readnone
declare <4 x float>
@llvm.gla.fMultiInsert.v4f32.v4f32.v3f32.v3f32.v3f32.f32(<4 x float>, i32,
<3 x float>, i32, <3 x float>, i32, <3 x float>, i32, float, i32) #0
@@ -1248,23 +1296,23 @@
highp vec4 H_e134ze = H_7ykb61 * H_wecuat;
highp vec4 H_w4mpj51 = H_e134ze + H_vk0dfn1;
outInst.color = H_w4mpj51;
- highp int image = imageAtomicAdd(im2Di, P, P.x);
- highp int image1 = int(imageAtomicAdd(im2Du, P, C_7));
- highp int image2 = imageAtomicMin(im2Di, P, C_4);
- highp int image3 = int(imageAtomicMin(im2Du, P, C_7));
- highp int image4 = imageAtomicMax(im2Di, P, C_4);
- highp int image5 = int(imageAtomicMax(im2Du, P, C_7));
- highp int image6 = imageAtomicAnd(im2Di, P, C_4);
- highp int image7 = int(imageAtomicAnd(im2Du, P, C_7));
- highp int image8 = imageAtomicOr(im2Di, P, C_4);
- highp int image9 = int(imageAtomicOr(im2Du, P, C_7));
- highp int imagea = imageAtomicXor(im2Di, P, C_4);
- highp int imageb = int(imageAtomicXor(im2Du, P, C_7));
- highp int imagec = imageAtomicExchange(im2Di, P, C_4);
- highp int imaged = int(imageAtomicExchange(im2Du, P, C_7));
- highp int imagee = imageAtomicExchange(im2Df, P, C_1d8);
- highp int imagef = imageAtomicCompSwap(im2Di, P, C_3, C_4);
- highp int imageg = int(imageAtomicCompSwap(im2Du, P, C_5, C_7));
+ highp int H_mjkl0z = imageAtomicAdd(im2Di, P, P.x);
+ highp int H_nbx7j2 = int(imageAtomicAdd(im2Du, P, C_7));
+ highp int H_jibexl = imageAtomicMin(im2Di, P, C_4);
+ highp int H_g61n5q = int(imageAtomicMin(im2Du, P, C_7));
+ highp int H_hq1jrg1 = imageAtomicMax(im2Di, P, C_4);
+ highp int H_6kaa4s = int(imageAtomicMax(im2Du, P, C_7));
+ highp int H_4y5svb1 = imageAtomicAnd(im2Di, P, C_4);
+ highp int H_1jbwq31 = int(imageAtomicAnd(im2Du, P, C_7));
+ highp int H_u14qg = imageAtomicOr(im2Di, P, C_4);
+ highp int H_xozrec1 = int(imageAtomicOr(im2Du, P, C_7));
+ highp int H_oo8y65 = imageAtomicXor(im2Di, P, C_4);
+ highp int H_99e9xw1 = int(imageAtomicXor(im2Du, P, C_7));
+ highp int H_i2kk971 = imageAtomicExchange(im2Di, P, C_4);
+ highp int H_lyyarb = int(imageAtomicExchange(im2Du, P, C_7));
+ highp float H_ke2fhb = imageAtomicExchange(im2Df, P, C_1d8);
+ highp int H_qc97iy1 = imageAtomicCompSwap(im2Di, P, C_3, C_4);
+ highp int H_9n52yt = int(imageAtomicCompSwap(im2Du, P, C_5, C_7));
}
@@ -1293,8 +1341,7 @@
ERROR: 0:197: 'imageAtomicOr' : no matching overloaded function found
ERROR: 0:199: 'imageAtomicXor' : no matching overloaded function found
ERROR: 0:201: 'imageAtomicExchange' : no matching overloaded function found
-ERROR: 0:202: '=' : cannot convert from 'global highp float' to 'temp
highp int'
ERROR: 0:204: 'imageAtomicCompSwap' : no matching overloaded function found
-ERROR: 22 compilation errors. No code generated.
+ERROR: 21 compilation errors. No code generated.