(glslang) Further removal of code we don't need

This commit is contained in:
LibretroAdmin 2022-07-29 14:43:22 +02:00
parent 22bef5dc6b
commit 11340c3c9c
9 changed files with 80 additions and 252 deletions

View File

@ -390,19 +390,6 @@ __inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
}
}
__inline bool isTypeSignedInt(TBasicType type)
{
switch (type) {
case EbtInt8:
case EbtInt16:
case EbtInt:
case EbtInt64:
return true;
default:
return false;
}
}
__inline bool isTypeUnsignedInt(TBasicType type)
{
switch (type) {
@ -416,11 +403,6 @@ __inline bool isTypeUnsignedInt(TBasicType type)
}
}
__inline bool isTypeInt(TBasicType type)
{
return isTypeSignedInt(type) || isTypeUnsignedInt(type);
}
__inline bool isTypeFloat(TBasicType type)
{
switch (type) {
@ -433,32 +415,6 @@ __inline bool isTypeFloat(TBasicType type)
}
}
__inline int getTypeRank(TBasicType type) {
int res = -1;
switch(type) {
case EbtInt8:
case EbtUint8:
res = 0;
break;
case EbtInt16:
case EbtUint16:
res = 1;
break;
case EbtInt:
case EbtUint:
res = 2;
break;
case EbtInt64:
case EbtUint64:
res = 3;
break;
default:
assert(false);
break;
}
return res;
}
} // end namespace glslang
#endif // _BASICTYPES_INCLUDED_

View File

@ -37,10 +37,6 @@
#ifndef _POOLALLOC_INCLUDED_
#define _POOLALLOC_INCLUDED_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
@ -81,37 +77,23 @@ public:
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
# ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
# endif
}
void check() const {
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size) {
return size + 2 * guardBlockSize + headerSize();
return size + 2 * 0 + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char* offsetAllocation(unsigned char* m) {
return m + guardBlockSize + headerSize();
return m + headerSize();
}
private:
void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char* preGuard() const { return mem + headerSize(); }
unsigned char* data() const { return preGuard() + guardBlockSize; }
unsigned char* data() const { return preGuard(); }
unsigned char* postGuard() const { return data() + size; }
size_t size; // size of the user data area
@ -122,12 +104,7 @@ private:
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
# ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
# else
inline static size_t headerSize() { return 0; }
# endif
};
//
@ -165,11 +142,6 @@ public:
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
@ -188,21 +160,11 @@ protected:
struct tHeader {
tHeader(tHeader* nextPage, size_t pageCount) :
#ifdef GUARD_BLOCKS
lastAllocation(0),
#endif
nextPage(nextPage), pageCount(pageCount) { }
~tHeader() {
#ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
#endif
}
#ifdef GUARD_BLOCKS
TAllocation* lastAllocation;
#endif
tHeader* nextPage;
size_t pageCount;
};
@ -214,15 +176,7 @@ protected:
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
#ifndef GUARD_BLOCKS
void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
#else
void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
new(memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
#endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}

View File

@ -46,109 +46,25 @@
#include "../MachineIndependent/Versions.h"
#include "InfoSink.h"
class TCompiler;
class TLinker;
class TUniformMap;
//
// The base class used to back handles returned to the driver.
//
class TShHandleBase {
public:
TShHandleBase() { pool = new glslang::TPoolAllocator; }
virtual ~TShHandleBase() { delete pool; }
virtual TCompiler* getAsCompiler() { return 0; }
virtual TLinker* getAsLinker() { return 0; }
virtual TUniformMap* getAsUniformMap() { return 0; }
virtual glslang::TPoolAllocator* getPool() const { return pool; }
private:
glslang::TPoolAllocator* pool;
};
//
// The base class for the machine dependent linker to derive from
// for managing where uniforms live.
//
class TUniformMap : public TShHandleBase {
public:
TUniformMap() { }
virtual ~TUniformMap() { }
virtual TUniformMap* getAsUniformMap() { return this; }
virtual int getLocation(const char* name) = 0;
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink infoSink;
};
class TIntermNode;
//
// The base class for the machine dependent compiler to derive from
// for managing object code from the compile.
//
class TCompiler : public TShHandleBase {
class TCompiler {
public:
TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l) { }
virtual ~TCompiler() { }
EShLanguage getLanguage() { return language; }
virtual TInfoSink& getInfoSink() { return infoSink; }
virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
virtual TCompiler* getAsCompiler() { return this; }
virtual bool linkable() { return haveValidObjectCode; }
TInfoSink& infoSink;
protected:
TCompiler& operator=(TCompiler&);
EShLanguage language;
bool haveValidObjectCode;
};
//
// Link operations are based on a list of compile results...
//
typedef glslang::TVector<TCompiler*> TCompilerList;
typedef glslang::TVector<TShHandleBase*> THandleList;
//
// The base class for the machine dependent linker to derive from
// to manage the resulting executable.
//
class TLinker : public TShHandleBase {
public:
TLinker(EShExecutable e, TInfoSink& iSink) :
infoSink(iSink),
executable(e),
haveReturnableObjectCode(false),
appAttributeBindings(0),
fixedAttributeBindings(0),
excludedAttributes(0),
excludedCount(0),
uniformBindings(0) { }
virtual TLinker* getAsLinker() { return this; }
virtual ~TLinker() { }
virtual bool link(TCompilerList&, TUniformMap*) = 0;
virtual bool link(THandleList&) { return false; }
virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink& infoSink;
protected:
TLinker& operator=(TLinker&);
EShExecutable executable;
bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
const ShBindingTable* appAttributeBindings;
const ShBindingTable* fixedAttributeBindings;
const int* excludedAttributes;
int excludedCount;
ShBindingTable* uniformBindings; // created by the linker
};
#endif // _SHHANDLE_INCLUDED_

View File

@ -1,13 +0,0 @@
// The file revision.h should be updated to the latest version, somehow, on
// check-in, if glslang has changed.
//
// revision.template is the source for revision.h when using SubWCRev as the
// method of updating revision.h. You don't have to do it this way, the
// requirement is only that revision.h gets updated.
//
// revision.h is under source control so that not all consumers of glslang
// source have to figure out how to create revision.h just to get a build
// going. However, if it is not updated, it can be a version behind.
#define GLSLANG_REVISION "$WCREV$"
#define GLSLANG_DATE "$WCDATE$"

View File

@ -52,7 +52,7 @@ typedef union {
// Some helper functions
bool isNan(double x)
static bool isNan(double x)
{
DoubleIntUnion u;
// tough to find a platform independent library function, do it directly
@ -63,7 +63,7 @@ bool isNan(double x)
((bitPatternH & 0xFFFFF) != 0 || bitPatternL != 0);
}
bool isInf(double x)
static bool isInf(double x)
{
DoubleIntUnion u;
// tough to find a platform independent library function, do it directly
@ -74,7 +74,9 @@ bool isInf(double x)
(bitPatternH & 0xFFFFF) == 0 && bitPatternL == 0;
}
const double pi = 3.1415926535897932384626433832795;
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
} // end anonymous namespace
@ -545,10 +547,10 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i] = ~unionArray[i];
break;
case EOpRadians:
newConstArray[i].setDConst(unionArray[i].getDConst() * pi / 180.0);
newConstArray[i].setDConst(unionArray[i].getDConst() * M_PI / 180.0);
break;
case EOpDegrees:
newConstArray[i].setDConst(unionArray[i].getDConst() * 180.0 / pi);
newConstArray[i].setDConst(unionArray[i].getDConst() * 180.0 / M_PI);
break;
case EOpSin:
newConstArray[i].setDConst(sin(unionArray[i].getDConst()));

View File

@ -51,6 +51,52 @@
namespace glslang {
static __inline bool isTypeSignedInt(TBasicType type)
{
switch (type) {
case EbtInt8:
case EbtInt16:
case EbtInt:
case EbtInt64:
return true;
default:
return false;
}
}
static __inline bool isTypeInt(TBasicType type)
{
return isTypeSignedInt(type) || isTypeUnsignedInt(type);
}
static __inline int getTypeRank(TBasicType type)
{
int res = -1;
switch(type) {
case EbtInt8:
case EbtUint8:
res = 0;
break;
case EbtInt16:
case EbtUint16:
res = 1;
break;
case EbtInt:
case EbtUint:
res = 2;
break;
case EbtInt64:
case EbtUint64:
res = 3;
break;
default:
assert(false);
break;
}
return res;
}
////////////////////////////////////////////////////////////////////////////
//
// First set of functions are to help build the intermediate representation.
@ -1636,7 +1682,6 @@ static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBas
}
}
static TBasicType getCorrespondingUnsignedType(TBasicType type) {
switch(type) {
case EbtInt8:

View File

@ -2676,6 +2676,24 @@ void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& q
invariantCheck(loc, qualifier);
}
static __inline bool isTypeSignedInt(TBasicType type)
{
switch (type) {
case EbtInt8:
case EbtInt16:
case EbtInt:
case EbtInt64:
return true;
default:
return false;
}
}
static __inline bool isTypeInt(TBasicType type)
{
return isTypeSignedInt(type) || isTypeUnsignedInt(type);
}
//
// Check a full qualifier and type (no variable yet) at global level.
//

View File

@ -150,38 +150,6 @@ const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
const unsigned char TAllocation::userDataFill = 0xcd;
# ifdef GUARD_BLOCKS
const size_t TAllocation::guardBlockSize = 16;
# else
const size_t TAllocation::guardBlockSize = 0;
# endif
//
// Check a single guard block for damage
//
#ifdef GUARD_BLOCKS
void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
#else
void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
#endif
{
#ifdef GUARD_BLOCKS
for (size_t x = 0; x < guardBlockSize; x++) {
if (blockMem[x] != val) {
const int maxSize = 80;
char assertMsg[maxSize];
// We don't print the assert message. It's here just to be helpful.
snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
locText, size, data());
assert(0 && "PoolAlloc: Damage in guard block");
}
}
#else
assert(guardBlockSize == 0);
#endif
}
void TPoolAllocator::push()
{
tAllocState state = { currentPageOffset, inUseList };
@ -229,16 +197,6 @@ void TPoolAllocator::pop()
stack.pop_back();
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred.
//
void TPoolAllocator::popAll()
{
while (stack.size() > 0)
pop();
}
void* TPoolAllocator::allocate(size_t numBytes)
{
// If we are using guard blocks, all allocations are bracketed by
@ -312,13 +270,4 @@ void* TPoolAllocator::allocate(size_t numBytes)
return initializeAllocation(inUseList, ret, numBytes);
}
//
// Check all allocations in a list for damage by calling check on each.
//
void TAllocation::checkAllocList() const
{
for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
alloc->check();
}
} // end namespace glslang

View File

@ -46,7 +46,7 @@
namespace {
bool IsInfinity(double x) {
static bool IsInfinity(double x) {
#ifdef _MSC_VER
switch (_fpclass(x)) {
case _FPCLASS_NINF:
@ -60,15 +60,16 @@ bool IsInfinity(double x) {
#endif
}
bool IsNan(double x) {
static bool IsNan(double x) {
#ifdef _MSC_VER
switch (_fpclass(x)) {
case _FPCLASS_SNAN:
case _FPCLASS_QNAN:
return true;
default:
return false;
break;
}
return false;
#else
return std::isnan(x);
#endif