This commit is contained in:
alexey.lysiuk 2018-03-10 17:13:00 +02:00
parent 94ef5c025e
commit 5d10826039
1043 changed files with 0 additions and 300555 deletions

Binary file not shown.

View File

@ -1,65 +0,0 @@
/*===-- llvm-c/Analysis.h - Analysis Library C Interface --------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMAnalysis.a, which *|
|* implements various analyses of the LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_ANALYSIS_H
#define LLVM_C_ANALYSIS_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCAnalysis Analysis
* @ingroup LLVMC
*
* @{
*/
typedef enum {
LLVMAbortProcessAction, /* verifier will print to stderr and abort() */
LLVMPrintMessageAction, /* verifier will print to stderr and return 1 */
LLVMReturnStatusAction /* verifier will just return 1 */
} LLVMVerifierFailureAction;
/* Verifies that a module is valid, taking the specified action if not.
Optionally returns a human-readable description of any invalid constructs.
OutMessage must be disposed with LLVMDisposeMessage. */
LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
char **OutMessage);
/* Verifies that a single function is valid, taking the specified action. Useful
for debugging. */
LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action);
/* Open up a ghostview window that displays the CFG of the current function.
Useful for debugging. */
void LLVMViewFunctionCFG(LLVMValueRef Fn);
void LLVMViewFunctionCFGOnly(LLVMValueRef Fn);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,85 +0,0 @@
/*===-- llvm-c/BitReader.h - BitReader Library C Interface ------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMBitReader.a, which *|
|* implements input of the LLVM bitcode format. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_BITREADER_H
#define LLVM_C_BITREADER_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCBitReader Bit Reader
* @ingroup LLVMC
*
* @{
*/
/* Builds a module from the bitcode in the specified memory buffer, returning a
reference to the module via the OutModule parameter. Returns 0 on success.
Optionally returns a human-readable error message via OutMessage.
This is deprecated. Use LLVMParseBitcode2. */
LLVMBool LLVMParseBitcode(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutModule,
char **OutMessage);
/* Builds a module from the bitcode in the specified memory buffer, returning a
reference to the module via the OutModule parameter. Returns 0 on success. */
LLVMBool LLVMParseBitcode2(LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutModule);
/* This is deprecated. Use LLVMParseBitcodeInContext2. */
LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutModule, char **OutMessage);
LLVMBool LLVMParseBitcodeInContext2(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutModule);
/** Reads a module from the specified path, returning via the OutMP parameter
a module provider which performs lazy deserialization. Returns 0 on success.
Optionally returns a human-readable error message via OutMessage.
This is deprecated. Use LLVMGetBitcodeModuleInContext2. */
LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutM, char **OutMessage);
/** Reads a module from the specified path, returning via the OutMP parameter a
* module provider which performs lazy deserialization. Returns 0 on success. */
LLVMBool LLVMGetBitcodeModuleInContext2(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutM);
/* This is deprecated. Use LLVMGetBitcodeModule2. */
LLVMBool LLVMGetBitcodeModule(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM,
char **OutMessage);
LLVMBool LLVMGetBitcodeModule2(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,59 +0,0 @@
/*===-- llvm-c/BitWriter.h - BitWriter Library C Interface ------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMBitWriter.a, which *|
|* implements output of the LLVM bitcode format. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_BITWRITER_H
#define LLVM_C_BITWRITER_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCBitWriter Bit Writer
* @ingroup LLVMC
*
* @{
*/
/*===-- Operations on modules ---------------------------------------------===*/
/** Writes a module to the specified path. Returns 0 on success. */
int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path);
/** Writes a module to an open file descriptor. Returns 0 on success. */
int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose,
int Unbuffered);
/** Deprecated for LLVMWriteBitcodeToFD. Writes a module to an open file
descriptor. Returns 0 on success. Closes the Handle. */
int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int Handle);
/** Writes a module to a new memory buffer and returns it. */
LLVMMemoryBufferRef LLVMWriteBitcodeToMemoryBuffer(LLVMModuleRef M);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,258 +0,0 @@
/*===-- llvm-c/Disassembler.h - Disassembler Public C Interface ---*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header provides a public interface to a disassembler library. *|
|* LLVM provides an implementation of this interface. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_DISASSEMBLER_H
#define LLVM_C_DISASSEMBLER_H
#include "llvm/Support/DataTypes.h"
#ifdef __cplusplus
#include <cstddef>
#else
#include <stddef.h>
#endif
/**
* @defgroup LLVMCDisassembler Disassembler
* @ingroup LLVMC
*
* @{
*/
/**
* An opaque reference to a disassembler context.
*/
typedef void *LLVMDisasmContextRef;
/**
* The type for the operand information call back function. This is called to
* get the symbolic information for an operand of an instruction. Typically
* this is from the relocation information, symbol table, etc. That block of
* information is saved when the disassembler context is created and passed to
* the call back in the DisInfo parameter. The instruction containing operand
* is at the PC parameter. For some instruction sets, there can be more than
* one operand with symbolic information. To determine the symbolic operand
* information for each operand, the bytes for the specific operand in the
* instruction are specified by the Offset parameter and its byte widith is the
* size parameter. For instructions sets with fixed widths and one symbolic
* operand per instruction, the Offset parameter will be zero and Size parameter
* will be the instruction width. The information is returned in TagBuf and is
* Triple specific with its specific information defined by the value of
* TagType for that Triple. If symbolic information is returned the function
* returns 1, otherwise it returns 0.
*/
typedef int (*LLVMOpInfoCallback)(void *DisInfo, uint64_t PC,
uint64_t Offset, uint64_t Size,
int TagType, void *TagBuf);
/**
* The initial support in LLVM MC for the most general form of a relocatable
* expression is "AddSymbol - SubtractSymbol + Offset". For some Darwin targets
* this full form is encoded in the relocation information so that AddSymbol and
* SubtractSymbol can be link edited independent of each other. Many other
* platforms only allow a relocatable expression of the form AddSymbol + Offset
* to be encoded.
*
* The LLVMOpInfoCallback() for the TagType value of 1 uses the struct
* LLVMOpInfo1. The value of the relocatable expression for the operand,
* including any PC adjustment, is passed in to the call back in the Value
* field. The symbolic information about the operand is returned using all
* the fields of the structure with the Offset of the relocatable expression
* returned in the Value field. It is possible that some symbols in the
* relocatable expression were assembly temporary symbols, for example
* "Ldata - LpicBase + constant", and only the Values of the symbols without
* symbol names are present in the relocation information. The VariantKind
* type is one of the Target specific #defines below and is used to print
* operands like "_foo@GOT", ":lower16:_foo", etc.
*/
struct LLVMOpInfoSymbol1 {
uint64_t Present; /* 1 if this symbol is present */
const char *Name; /* symbol name if not NULL */
uint64_t Value; /* symbol value if name is NULL */
};
struct LLVMOpInfo1 {
struct LLVMOpInfoSymbol1 AddSymbol;
struct LLVMOpInfoSymbol1 SubtractSymbol;
uint64_t Value;
uint64_t VariantKind;
};
/**
* The operand VariantKinds for symbolic disassembly.
*/
#define LLVMDisassembler_VariantKind_None 0 /* all targets */
/**
* The ARM target VariantKinds.
*/
#define LLVMDisassembler_VariantKind_ARM_HI16 1 /* :upper16: */
#define LLVMDisassembler_VariantKind_ARM_LO16 2 /* :lower16: */
/**
* The ARM64 target VariantKinds.
*/
#define LLVMDisassembler_VariantKind_ARM64_PAGE 1 /* @page */
#define LLVMDisassembler_VariantKind_ARM64_PAGEOFF 2 /* @pageoff */
#define LLVMDisassembler_VariantKind_ARM64_GOTPAGE 3 /* @gotpage */
#define LLVMDisassembler_VariantKind_ARM64_GOTPAGEOFF 4 /* @gotpageoff */
#define LLVMDisassembler_VariantKind_ARM64_TLVP 5 /* @tvlppage */
#define LLVMDisassembler_VariantKind_ARM64_TLVOFF 6 /* @tvlppageoff */
/**
* The type for the symbol lookup function. This may be called by the
* disassembler for things like adding a comment for a PC plus a constant
* offset load instruction to use a symbol name instead of a load address value.
* It is passed the block information is saved when the disassembler context is
* created and the ReferenceValue to look up as a symbol. If no symbol is found
* for the ReferenceValue NULL is returned. The ReferenceType of the
* instruction is passed indirectly as is the PC of the instruction in
* ReferencePC. If the output reference can be determined its type is returned
* indirectly in ReferenceType along with ReferenceName if any, or that is set
* to NULL.
*/
typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
uint64_t ReferenceValue,
uint64_t *ReferenceType,
uint64_t ReferencePC,
const char **ReferenceName);
/**
* The reference types on input and output.
*/
/* No input reference type or no output reference type. */
#define LLVMDisassembler_ReferenceType_InOut_None 0
/* The input reference is from a branch instruction. */
#define LLVMDisassembler_ReferenceType_In_Branch 1
/* The input reference is from a PC relative load instruction. */
#define LLVMDisassembler_ReferenceType_In_PCrel_Load 2
/* The input reference is from an ARM64::ADRP instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADRP 0x100000001
/* The input reference is from an ARM64::ADDXri instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADDXri 0x100000002
/* The input reference is from an ARM64::LDRXui instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXui 0x100000003
/* The input reference is from an ARM64::LDRXl instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXl 0x100000004
/* The input reference is from an ARM64::ADR instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADR 0x100000005
/* The output reference is to as symbol stub. */
#define LLVMDisassembler_ReferenceType_Out_SymbolStub 1
/* The output reference is to a symbol address in a literal pool. */
#define LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr 2
/* The output reference is to a cstring address in a literal pool. */
#define LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr 3
/* The output reference is to a Objective-C CoreFoundation string. */
#define LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref 4
/* The output reference is to a Objective-C message. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Message 5
/* The output reference is to a Objective-C message ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Message_Ref 6
/* The output reference is to a Objective-C selector ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Selector_Ref 7
/* The output reference is to a Objective-C class ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Class_Ref 8
/* The output reference is to a C++ symbol name. */
#define LLVMDisassembler_ReferenceType_DeMangled_Name 9
#ifdef __cplusplus
extern "C" {
#endif /* !defined(__cplusplus) */
/**
* Create a disassembler for the TripleName. Symbolic disassembly is supported
* by passing a block of information in the DisInfo parameter and specifying the
* TagType and callback functions as described above. These can all be passed
* as NULL. If successful, this returns a disassembler context. If not, it
* returns NULL. This function is equivalent to calling
* LLVMCreateDisasmCPUFeatures() with an empty CPU name and feature set.
*/
LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
int TagType, LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp);
/**
* Create a disassembler for the TripleName and a specific CPU. Symbolic
* disassembly is supported by passing a block of information in the DisInfo
* parameter and specifying the TagType and callback functions as described
* above. These can all be passed * as NULL. If successful, this returns a
* disassembler context. If not, it returns NULL. This function is equivalent
* to calling LLVMCreateDisasmCPUFeatures() with an empty feature set.
*/
LLVMDisasmContextRef LLVMCreateDisasmCPU(const char *Triple, const char *CPU,
void *DisInfo, int TagType,
LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp);
/**
* Create a disassembler for the TripleName, a specific CPU and specific feature
* string. Symbolic disassembly is supported by passing a block of information
* in the DisInfo parameter and specifying the TagType and callback functions as
* described above. These can all be passed * as NULL. If successful, this
* returns a disassembler context. If not, it returns NULL.
*/
LLVMDisasmContextRef
LLVMCreateDisasmCPUFeatures(const char *Triple, const char *CPU,
const char *Features, void *DisInfo, int TagType,
LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp);
/**
* Set the disassembler's options. Returns 1 if it can set the Options and 0
* otherwise.
*/
int LLVMSetDisasmOptions(LLVMDisasmContextRef DC, uint64_t Options);
/* The option to produce marked up assembly. */
#define LLVMDisassembler_Option_UseMarkup 1
/* The option to print immediates as hex. */
#define LLVMDisassembler_Option_PrintImmHex 2
/* The option use the other assembler printer variant */
#define LLVMDisassembler_Option_AsmPrinterVariant 4
/* The option to set comment on instructions */
#define LLVMDisassembler_Option_SetInstrComments 8
/* The option to print latency information alongside instructions */
#define LLVMDisassembler_Option_PrintLatency 16
/**
* Dispose of a disassembler context.
*/
void LLVMDisasmDispose(LLVMDisasmContextRef DC);
/**
* Disassemble a single instruction using the disassembler context specified in
* the parameter DC. The bytes of the instruction are specified in the
* parameter Bytes, and contains at least BytesSize number of bytes. The
* instruction is at the address specified by the PC parameter. If a valid
* instruction can be disassembled, its string is returned indirectly in
* OutString whose size is specified in the parameter OutStringSize. This
* function returns the number of bytes in the instruction or zero if there was
* no valid instruction.
*/
size_t LLVMDisasmInstruction(LLVMDisasmContextRef DC, uint8_t *Bytes,
uint64_t BytesSize, uint64_t PC,
char *OutString, size_t OutStringSize);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* !defined(__cplusplus) */
#endif /* LLVM_C_DISASSEMBLER_H */

View File

@ -1,49 +0,0 @@
/*===-- llvm-c/ErrorHandling.h - Error Handling C Interface -------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines the C interface to LLVM's error handling mechanism. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_ERROR_HANDLING_H
#define LLVM_C_ERROR_HANDLING_H
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*LLVMFatalErrorHandler)(const char *Reason);
/**
* Install a fatal error handler. By default, if LLVM detects a fatal error, it
* will call exit(1). This may not be appropriate in many contexts. For example,
* doing exit(1) will bypass many crash reporting/tracing system tools. This
* function allows you to install a callback that will be invoked prior to the
* call to exit(1).
*/
void LLVMInstallFatalErrorHandler(LLVMFatalErrorHandler Handler);
/**
* Reset the fatal error handler. This resets LLVM's fatal error handling
* behavior to the default.
*/
void LLVMResetFatalErrorHandler(void);
/**
* Enable LLVM's built-in stack trace code. This intercepts the OS's crash
* signals and prints which component of LLVM you were in at the time if the
* crash.
*/
void LLVMEnablePrettyStackTrace(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,193 +0,0 @@
/*===-- llvm-c/ExecutionEngine.h - ExecutionEngine Lib C Iface --*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMExecutionEngine.o, which *|
|* implements various analyses of the LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_EXECUTIONENGINE_H
#define LLVM_C_EXECUTIONENGINE_H
#include "llvm-c/Types.h"
#include "llvm-c/Target.h"
#include "llvm-c/TargetMachine.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCExecutionEngine Execution Engine
* @ingroup LLVMC
*
* @{
*/
void LLVMLinkInMCJIT(void);
void LLVMLinkInInterpreter(void);
typedef struct LLVMOpaqueGenericValue *LLVMGenericValueRef;
typedef struct LLVMOpaqueExecutionEngine *LLVMExecutionEngineRef;
typedef struct LLVMOpaqueMCJITMemoryManager *LLVMMCJITMemoryManagerRef;
struct LLVMMCJITCompilerOptions {
unsigned OptLevel;
LLVMCodeModel CodeModel;
LLVMBool NoFramePointerElim;
LLVMBool EnableFastISel;
LLVMMCJITMemoryManagerRef MCJMM;
};
/*===-- Operations on generic values --------------------------------------===*/
LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
unsigned long long N,
LLVMBool IsSigned);
LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P);
LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef Ty, double N);
unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef);
unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenVal,
LLVMBool IsSigned);
void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal);
double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal);
void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal);
/*===-- Operations on execution engines -----------------------------------===*/
LLVMBool LLVMCreateExecutionEngineForModule(LLVMExecutionEngineRef *OutEE,
LLVMModuleRef M,
char **OutError);
LLVMBool LLVMCreateInterpreterForModule(LLVMExecutionEngineRef *OutInterp,
LLVMModuleRef M,
char **OutError);
LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT,
LLVMModuleRef M,
unsigned OptLevel,
char **OutError);
void LLVMInitializeMCJITCompilerOptions(
struct LLVMMCJITCompilerOptions *Options, size_t SizeOfOptions);
/**
* Create an MCJIT execution engine for a module, with the given options. It is
* the responsibility of the caller to ensure that all fields in Options up to
* the given SizeOfOptions are initialized. It is correct to pass a smaller
* value of SizeOfOptions that omits some fields. The canonical way of using
* this is:
*
* LLVMMCJITCompilerOptions options;
* LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
* ... fill in those options you care about
* LLVMCreateMCJITCompilerForModule(&jit, mod, &options, sizeof(options),
* &error);
*
* Note that this is also correct, though possibly suboptimal:
*
* LLVMCreateMCJITCompilerForModule(&jit, mod, 0, 0, &error);
*/
LLVMBool LLVMCreateMCJITCompilerForModule(
LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M,
struct LLVMMCJITCompilerOptions *Options, size_t SizeOfOptions,
char **OutError);
void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE);
void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE);
void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE);
int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
unsigned ArgC, const char * const *ArgV,
const char * const *EnvP);
LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
unsigned NumArgs,
LLVMGenericValueRef *Args);
void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F);
void LLVMAddModule(LLVMExecutionEngineRef EE, LLVMModuleRef M);
LLVMBool LLVMRemoveModule(LLVMExecutionEngineRef EE, LLVMModuleRef M,
LLVMModuleRef *OutMod, char **OutError);
LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
LLVMValueRef *OutFn);
void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE,
LLVMValueRef Fn);
LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE);
LLVMTargetMachineRef
LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE);
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
void* Addr);
void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global);
uint64_t LLVMGetGlobalValueAddress(LLVMExecutionEngineRef EE, const char *Name);
uint64_t LLVMGetFunctionAddress(LLVMExecutionEngineRef EE, const char *Name);
/*===-- Operations on memory managers -------------------------------------===*/
typedef uint8_t *(*LLVMMemoryManagerAllocateCodeSectionCallback)(
void *Opaque, uintptr_t Size, unsigned Alignment, unsigned SectionID,
const char *SectionName);
typedef uint8_t *(*LLVMMemoryManagerAllocateDataSectionCallback)(
void *Opaque, uintptr_t Size, unsigned Alignment, unsigned SectionID,
const char *SectionName, LLVMBool IsReadOnly);
typedef LLVMBool (*LLVMMemoryManagerFinalizeMemoryCallback)(
void *Opaque, char **ErrMsg);
typedef void (*LLVMMemoryManagerDestroyCallback)(void *Opaque);
/**
* Create a simple custom MCJIT memory manager. This memory manager can
* intercept allocations in a module-oblivious way. This will return NULL
* if any of the passed functions are NULL.
*
* @param Opaque An opaque client object to pass back to the callbacks.
* @param AllocateCodeSection Allocate a block of memory for executable code.
* @param AllocateDataSection Allocate a block of memory for data.
* @param FinalizeMemory Set page permissions and flush cache. Return 0 on
* success, 1 on error.
*/
LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
void *Opaque,
LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
LLVMMemoryManagerDestroyCallback Destroy);
void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -1,40 +0,0 @@
/*===-- llvm-c/IRReader.h - IR Reader C Interface -----------------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines the C interface to the IR Reader. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_IRREADER_H
#define LLVM_C_IRREADER_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Read LLVM IR from a memory buffer and convert it into an in-memory Module
* object. Returns 0 on success.
* Optionally returns a human-readable description of any errors that
* occurred during parsing IR. OutMessage must be disposed with
* LLVMDisposeMessage.
*
* @see llvm::ParseIR()
*/
LLVMBool LLVMParseIRInContext(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM,
char **OutMessage);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,55 +0,0 @@
/*===-- llvm-c/Initialization.h - Initialization C Interface ------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to LLVM initialization routines, *|
|* which must be called before you can use the functionality provided by *|
|* the corresponding LLVM library. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_INITIALIZATION_H
#define LLVM_C_INITIALIZATION_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCInitialization Initialization Routines
* @ingroup LLVMC
*
* This module contains routines used to initialize the LLVM system.
*
* @{
*/
void LLVMInitializeCore(LLVMPassRegistryRef R);
void LLVMInitializeTransformUtils(LLVMPassRegistryRef R);
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R);
void LLVMInitializeObjCARCOpts(LLVMPassRegistryRef R);
void LLVMInitializeVectorization(LLVMPassRegistryRef R);
void LLVMInitializeInstCombine(LLVMPassRegistryRef R);
void LLVMInitializeIPO(LLVMPassRegistryRef R);
void LLVMInitializeInstrumentation(LLVMPassRegistryRef R);
void LLVMInitializeAnalysis(LLVMPassRegistryRef R);
void LLVMInitializeIPA(LLVMPassRegistryRef R);
void LLVMInitializeCodeGen(LLVMPassRegistryRef R);
void LLVMInitializeTarget(LLVMPassRegistryRef R);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,69 +0,0 @@
//===-- llvm/LinkTimeOptimizer.h - LTO Public C Interface -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This header provides a C API to use the LLVM link time optimization
// library. This is intended to be used by linkers which are C-only in
// their implementation for performing LTO.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_C_LINKTIMEOPTIMIZER_H
#define LLVM_C_LINKTIMEOPTIMIZER_H
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCLinkTimeOptimizer Link Time Optimization
* @ingroup LLVMC
*
* @{
*/
/// This provides a dummy type for pointers to the LTO object.
typedef void* llvm_lto_t;
/// This provides a C-visible enumerator to manage status codes.
/// This should map exactly onto the C++ enumerator LTOStatus.
typedef enum llvm_lto_status {
LLVM_LTO_UNKNOWN,
LLVM_LTO_OPT_SUCCESS,
LLVM_LTO_READ_SUCCESS,
LLVM_LTO_READ_FAILURE,
LLVM_LTO_WRITE_FAILURE,
LLVM_LTO_NO_TARGET,
LLVM_LTO_NO_WORK,
LLVM_LTO_MODULE_MERGE_FAILURE,
LLVM_LTO_ASM_FAILURE,
// Added C-specific error codes
LLVM_LTO_NULL_OBJECT
} llvm_lto_status_t;
/// This provides C interface to initialize link time optimizer. This allows
/// linker to use dlopen() interface to dynamically load LinkTimeOptimizer.
/// extern "C" helps, because dlopen() interface uses name to find the symbol.
extern llvm_lto_t llvm_create_optimizer(void);
extern void llvm_destroy_optimizer(llvm_lto_t lto);
extern llvm_lto_status_t llvm_read_object_file
(llvm_lto_t lto, const char* input_filename);
extern llvm_lto_status_t llvm_optimize_modules
(llvm_lto_t lto, const char* output_filename);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,41 +0,0 @@
/*===-- llvm-c/Linker.h - Module Linker C Interface -------------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines the C interface to the module/file/archive linker. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_LINKER_H
#define LLVM_C_LINKER_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/* This enum is provided for backwards-compatibility only. It has no effect. */
typedef enum {
LLVMLinkerDestroySource = 0, /* This is the default behavior. */
LLVMLinkerPreserveSource_Removed = 1 /* This option has been deprecated and
should not be used. */
} LLVMLinkerMode;
/* Links the source module into the destination module. The source module is
* destroyed.
* The return value is true if an error occurred, false otherwise.
* Use the diagnostic handler to get any diagnostic message.
*/
LLVMBool LLVMLinkModules2(LLVMModuleRef Dest, LLVMModuleRef Src);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,100 +0,0 @@
/*===-- llvm-c/Object.h - Object Lib C Iface --------------------*- C++ -*-===*/
/* */
/* The LLVM Compiler Infrastructure */
/* */
/* This file is distributed under the University of Illinois Open Source */
/* License. See LICENSE.TXT for details. */
/* */
/*===----------------------------------------------------------------------===*/
/* */
/* This header declares the C interface to libLLVMObject.a, which */
/* implements object file reading and writing. */
/* */
/* Many exotic languages can interoperate with C code but have a harder time */
/* with C++ due to name mangling. So in addition to C, this interface enables */
/* tools written in such languages. */
/* */
/*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_OBJECT_H
#define LLVM_C_OBJECT_H
#include "llvm-c/Types.h"
#include "llvm/Config/llvm-config.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCObject Object file reading and writing
* @ingroup LLVMC
*
* @{
*/
// Opaque type wrappers
typedef struct LLVMOpaqueObjectFile *LLVMObjectFileRef;
typedef struct LLVMOpaqueSectionIterator *LLVMSectionIteratorRef;
typedef struct LLVMOpaqueSymbolIterator *LLVMSymbolIteratorRef;
typedef struct LLVMOpaqueRelocationIterator *LLVMRelocationIteratorRef;
// ObjectFile creation
LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf);
void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile);
// ObjectFile Section iterators
LLVMSectionIteratorRef LLVMGetSections(LLVMObjectFileRef ObjectFile);
void LLVMDisposeSectionIterator(LLVMSectionIteratorRef SI);
LLVMBool LLVMIsSectionIteratorAtEnd(LLVMObjectFileRef ObjectFile,
LLVMSectionIteratorRef SI);
void LLVMMoveToNextSection(LLVMSectionIteratorRef SI);
void LLVMMoveToContainingSection(LLVMSectionIteratorRef Sect,
LLVMSymbolIteratorRef Sym);
// ObjectFile Symbol iterators
LLVMSymbolIteratorRef LLVMGetSymbols(LLVMObjectFileRef ObjectFile);
void LLVMDisposeSymbolIterator(LLVMSymbolIteratorRef SI);
LLVMBool LLVMIsSymbolIteratorAtEnd(LLVMObjectFileRef ObjectFile,
LLVMSymbolIteratorRef SI);
void LLVMMoveToNextSymbol(LLVMSymbolIteratorRef SI);
// SectionRef accessors
const char *LLVMGetSectionName(LLVMSectionIteratorRef SI);
uint64_t LLVMGetSectionSize(LLVMSectionIteratorRef SI);
const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI);
uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI);
LLVMBool LLVMGetSectionContainsSymbol(LLVMSectionIteratorRef SI,
LLVMSymbolIteratorRef Sym);
// Section Relocation iterators
LLVMRelocationIteratorRef LLVMGetRelocations(LLVMSectionIteratorRef Section);
void LLVMDisposeRelocationIterator(LLVMRelocationIteratorRef RI);
LLVMBool LLVMIsRelocationIteratorAtEnd(LLVMSectionIteratorRef Section,
LLVMRelocationIteratorRef RI);
void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef RI);
// SymbolRef accessors
const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI);
uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI);
uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI);
// RelocationRef accessors
uint64_t LLVMGetRelocationOffset(LLVMRelocationIteratorRef RI);
LLVMSymbolIteratorRef LLVMGetRelocationSymbol(LLVMRelocationIteratorRef RI);
uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI);
// NOTE: Caller takes ownership of returned string of the two
// following functions.
const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI);
const char *LLVMGetRelocationValueString(LLVMRelocationIteratorRef RI);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -1,141 +0,0 @@
/*===----------- llvm-c/OrcBindings.h - Orc Lib C Iface ---------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMOrcJIT.a, which implements *|
|* JIT compilation of LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
|* Note: This interface is experimental. It is *NOT* stable, and may be *|
|* changed without warning. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_ORCBINDINGS_H
#define LLVM_C_ORCBINDINGS_H
#include "llvm-c/Object.h"
#include "llvm-c/TargetMachine.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct LLVMOrcOpaqueJITStack *LLVMOrcJITStackRef;
typedef uint32_t LLVMOrcModuleHandle;
typedef uint64_t LLVMOrcTargetAddress;
typedef uint64_t (*LLVMOrcSymbolResolverFn)(const char *Name, void *LookupCtx);
typedef uint64_t (*LLVMOrcLazyCompileCallbackFn)(LLVMOrcJITStackRef JITStack,
void *CallbackCtx);
typedef enum { LLVMOrcErrSuccess = 0, LLVMOrcErrGeneric } LLVMOrcErrorCode;
/**
* Create an ORC JIT stack.
*
* The client owns the resulting stack, and must call OrcDisposeInstance(...)
* to destroy it and free its memory. The JIT stack will take ownership of the
* TargetMachine, which will be destroyed when the stack is destroyed. The
* client should not attempt to dispose of the Target Machine, or it will result
* in a double-free.
*/
LLVMOrcJITStackRef LLVMOrcCreateInstance(LLVMTargetMachineRef TM);
/**
* Get the error message for the most recent error (if any).
*
* This message is owned by the ORC JIT Stack and will be freed when the stack
* is disposed of by LLVMOrcDisposeInstance.
*/
const char *LLVMOrcGetErrorMsg(LLVMOrcJITStackRef JITStack);
/**
* Mangle the given symbol.
* Memory will be allocated for MangledSymbol to hold the result. The client
*/
void LLVMOrcGetMangledSymbol(LLVMOrcJITStackRef JITStack, char **MangledSymbol,
const char *Symbol);
/**
* Dispose of a mangled symbol.
*/
void LLVMOrcDisposeMangledSymbol(char *MangledSymbol);
/**
* Create a lazy compile callback.
*/
LLVMOrcTargetAddress
LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
LLVMOrcLazyCompileCallbackFn Callback,
void *CallbackCtx);
/**
* Create a named indirect call stub.
*/
LLVMOrcErrorCode LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
const char *StubName,
LLVMOrcTargetAddress InitAddr);
/**
* Set the pointer for the given indirect stub.
*/
LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
const char *StubName,
LLVMOrcTargetAddress NewAddr);
/**
* Add module to be eagerly compiled.
*/
LLVMOrcModuleHandle
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
/**
* Add module to be lazily compiled one function at a time.
*/
LLVMOrcModuleHandle
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
/**
* Add an object file.
*/
LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
LLVMObjectFileRef Obj,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
/**
* Remove a module set from the JIT.
*
* This works for all modules that can be added via OrcAdd*, including object
* files.
*/
void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H);
/**
* Get symbol address from JIT instance.
*/
LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
const char *SymbolName);
/**
* Dispose of an ORC JIT stack.
*/
void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
#ifdef __cplusplus
}
#endif /* extern "C" */
#endif /* LLVM_C_ORCBINDINGS_H */

View File

@ -1,65 +0,0 @@
/*===-- llvm-c/Support.h - Support C Interface --------------------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines the C interface to the LLVM support library. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_SUPPORT_H
#define LLVM_C_SUPPORT_H
#include "llvm/Support/DataTypes.h"
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* This function permanently loads the dynamic library at the given path.
* It is safe to call this function multiple times for the same library.
*
* @see sys::DynamicLibrary::LoadLibraryPermanently()
*/
LLVMBool LLVMLoadLibraryPermanently(const char* Filename);
/**
* This function parses the given arguments using the LLVM command line parser.
* Note that the only stable thing about this function is its signature; you
* cannot rely on any particular set of command line arguments being interpreted
* the same way across LLVM versions.
*
* @see llvm::cl::ParseCommandLineOptions()
*/
void LLVMParseCommandLineOptions(int argc, const char *const *argv,
const char *Overview);
/**
* This function will search through all previously loaded dynamic
* libraries for the symbol \p symbolName. If it is found, the address of
* that symbol is returned. If not, null is returned.
*
* @see sys::DynamicLibrary::SearchForAddressOfSymbol()
*/
void *LLVMSearchForAddressOfSymbol(const char *symbolName);
/**
* This functions permanently adds the symbol \p symbolName with the
* value \p symbolValue. These symbols are searched before any
* libraries.
*
* @see sys::DynamicLibrary::AddSymbol()
*/
void LLVMAddSymbol(const char *symbolName, void *symbolValue);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,299 +0,0 @@
/*===-- llvm-c/Target.h - Target Lib C Iface --------------------*- C++ -*-===*/
/* */
/* The LLVM Compiler Infrastructure */
/* */
/* This file is distributed under the University of Illinois Open Source */
/* License. See LICENSE.TXT for details. */
/* */
/*===----------------------------------------------------------------------===*/
/* */
/* This header declares the C interface to libLLVMTarget.a, which */
/* implements target information. */
/* */
/* Many exotic languages can interoperate with C code but have a harder time */
/* with C++ due to name mangling. So in addition to C, this interface enables */
/* tools written in such languages. */
/* */
/*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TARGET_H
#define LLVM_C_TARGET_H
#include "llvm-c/Types.h"
#include "llvm/Config/llvm-config.h"
#if defined(_MSC_VER) && !defined(inline)
#define inline __inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTarget Target information
* @ingroup LLVMC
*
* @{
*/
enum LLVMByteOrdering { LLVMBigEndian, LLVMLittleEndian };
typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
typedef struct LLVMOpaqueTargetLibraryInfotData *LLVMTargetLibraryInfoRef;
/* Declare all of the target-initialization functions that are available. */
#define LLVM_TARGET(TargetName) \
void LLVMInitialize##TargetName##TargetInfo(void);
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target(void);
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
#define LLVM_TARGET(TargetName) \
void LLVMInitialize##TargetName##TargetMC(void);
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
/* Declare all of the available assembly printer initialization functions. */
#define LLVM_ASM_PRINTER(TargetName) \
void LLVMInitialize##TargetName##AsmPrinter(void);
#include "llvm/Config/AsmPrinters.def"
#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
/* Declare all of the available assembly parser initialization functions. */
#define LLVM_ASM_PARSER(TargetName) \
void LLVMInitialize##TargetName##AsmParser(void);
#include "llvm/Config/AsmParsers.def"
#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
/* Declare all of the available disassembler initialization functions. */
#define LLVM_DISASSEMBLER(TargetName) \
void LLVMInitialize##TargetName##Disassembler(void);
#include "llvm/Config/Disassemblers.def"
#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
/** LLVMInitializeAllTargetInfos - The main program should call this function if
it wants access to all available targets that LLVM is configured to
support. */
static inline void LLVMInitializeAllTargetInfos(void) {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo();
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
}
/** LLVMInitializeAllTargets - The main program should call this function if it
wants to link in all available targets that LLVM is configured to
support. */
static inline void LLVMInitializeAllTargets(void) {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target();
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
}
/** LLVMInitializeAllTargetMCs - The main program should call this function if
it wants access to all available target MC that LLVM is configured to
support. */
static inline void LLVMInitializeAllTargetMCs(void) {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetMC();
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
}
/** LLVMInitializeAllAsmPrinters - The main program should call this function if
it wants all asm printers that LLVM is configured to support, to make them
available via the TargetRegistry. */
static inline void LLVMInitializeAllAsmPrinters(void) {
#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
}
/** LLVMInitializeAllAsmParsers - The main program should call this function if
it wants all asm parsers that LLVM is configured to support, to make them
available via the TargetRegistry. */
static inline void LLVMInitializeAllAsmParsers(void) {
#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
#include "llvm/Config/AsmParsers.def"
#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
}
/** LLVMInitializeAllDisassemblers - The main program should call this function
if it wants all disassemblers that LLVM is configured to support, to make
them available via the TargetRegistry. */
static inline void LLVMInitializeAllDisassemblers(void) {
#define LLVM_DISASSEMBLER(TargetName) \
LLVMInitialize##TargetName##Disassembler();
#include "llvm/Config/Disassemblers.def"
#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
}
/** LLVMInitializeNativeTarget - The main program should call this function to
initialize the native target corresponding to the host. This is useful
for JIT applications to ensure that the target gets linked in correctly. */
static inline LLVMBool LLVMInitializeNativeTarget(void) {
/* If we have a native target, initialize it to ensure it is linked in. */
#ifdef LLVM_NATIVE_TARGET
LLVM_NATIVE_TARGETINFO();
LLVM_NATIVE_TARGET();
LLVM_NATIVE_TARGETMC();
return 0;
#else
return 1;
#endif
}
/** LLVMInitializeNativeTargetAsmParser - The main program should call this
function to initialize the parser for the native target corresponding to the
host. */
static inline LLVMBool LLVMInitializeNativeAsmParser(void) {
#ifdef LLVM_NATIVE_ASMPARSER
LLVM_NATIVE_ASMPARSER();
return 0;
#else
return 1;
#endif
}
/** LLVMInitializeNativeTargetAsmPrinter - The main program should call this
function to initialize the printer for the native target corresponding to
the host. */
static inline LLVMBool LLVMInitializeNativeAsmPrinter(void) {
#ifdef LLVM_NATIVE_ASMPRINTER
LLVM_NATIVE_ASMPRINTER();
return 0;
#else
return 1;
#endif
}
/** LLVMInitializeNativeTargetDisassembler - The main program should call this
function to initialize the disassembler for the native target corresponding
to the host. */
static inline LLVMBool LLVMInitializeNativeDisassembler(void) {
#ifdef LLVM_NATIVE_DISASSEMBLER
LLVM_NATIVE_DISASSEMBLER();
return 0;
#else
return 1;
#endif
}
/*===-- Target Data -------------------------------------------------------===*/
/**
* Obtain the data layout for a module.
*
* @see Module::getDataLayout()
*/
LLVMTargetDataRef LLVMGetModuleDataLayout(LLVMModuleRef M);
/**
* Set the data layout for a module.
*
* @see Module::setDataLayout()
*/
void LLVMSetModuleDataLayout(LLVMModuleRef M, LLVMTargetDataRef DL);
/** Creates target data from a target layout string.
See the constructor llvm::DataLayout::DataLayout. */
LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep);
/** Deallocates a TargetData.
See the destructor llvm::DataLayout::~DataLayout. */
void LLVMDisposeTargetData(LLVMTargetDataRef TD);
/** Adds target library information to a pass manager. This does not take
ownership of the target library info.
See the method llvm::PassManagerBase::add. */
void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
LLVMPassManagerRef PM);
/** Converts target data to a target layout string. The string must be disposed
with LLVMDisposeMessage.
See the constructor llvm::DataLayout::DataLayout. */
char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef TD);
/** Returns the byte order of a target, either LLVMBigEndian or
LLVMLittleEndian.
See the method llvm::DataLayout::isLittleEndian. */
enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD);
/** Returns the pointer size in bytes for a target.
See the method llvm::DataLayout::getPointerSize. */
unsigned LLVMPointerSize(LLVMTargetDataRef TD);
/** Returns the pointer size in bytes for a target for a specified
address space.
See the method llvm::DataLayout::getPointerSize. */
unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS);
/** Returns the integer type that is the same size as a pointer on a target.
See the method llvm::DataLayout::getIntPtrType. */
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD);
/** Returns the integer type that is the same size as a pointer on a target.
This version allows the address space to be specified.
See the method llvm::DataLayout::getIntPtrType. */
LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS);
/** Returns the integer type that is the same size as a pointer on a target.
See the method llvm::DataLayout::getIntPtrType. */
LLVMTypeRef LLVMIntPtrTypeInContext(LLVMContextRef C, LLVMTargetDataRef TD);
/** Returns the integer type that is the same size as a pointer on a target.
This version allows the address space to be specified.
See the method llvm::DataLayout::getIntPtrType. */
LLVMTypeRef LLVMIntPtrTypeForASInContext(LLVMContextRef C, LLVMTargetDataRef TD,
unsigned AS);
/** Computes the size of a type in bytes for a target.
See the method llvm::DataLayout::getTypeSizeInBits. */
unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty);
/** Computes the storage size of a type in bytes for a target.
See the method llvm::DataLayout::getTypeStoreSize. */
unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
/** Computes the ABI size of a type in bytes for a target.
See the method llvm::DataLayout::getTypeAllocSize. */
unsigned long long LLVMABISizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
/** Computes the ABI alignment of a type in bytes for a target.
See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
/** Computes the call frame alignment of a type in bytes for a target.
See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
/** Computes the preferred alignment of a type in bytes for a target.
See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
/** Computes the preferred alignment of a global variable in bytes for a target.
See the method llvm::DataLayout::getPreferredAlignment. */
unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef TD,
LLVMValueRef GlobalVar);
/** Computes the structure element that contains the byte offset for a target.
See the method llvm::StructLayout::getElementContainingOffset. */
unsigned LLVMElementAtOffset(LLVMTargetDataRef TD, LLVMTypeRef StructTy,
unsigned long long Offset);
/** Computes the byte offset of the indexed struct element for a target.
See the method llvm::StructLayout::getElementContainingOffset. */
unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef TD,
LLVMTypeRef StructTy, unsigned Element);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -1,147 +0,0 @@
/*===-- llvm-c/TargetMachine.h - Target Machine Library C Interface - C++ -*-=*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to the Target and TargetMachine *|
|* classes, which can be used to generate assembly or object files. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TARGETMACHINE_H
#define LLVM_C_TARGETMACHINE_H
#include "llvm-c/Types.h"
#include "llvm-c/Target.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
typedef struct LLVMTarget *LLVMTargetRef;
typedef enum {
LLVMCodeGenLevelNone,
LLVMCodeGenLevelLess,
LLVMCodeGenLevelDefault,
LLVMCodeGenLevelAggressive
} LLVMCodeGenOptLevel;
typedef enum {
LLVMRelocDefault,
LLVMRelocStatic,
LLVMRelocPIC,
LLVMRelocDynamicNoPic
} LLVMRelocMode;
typedef enum {
LLVMCodeModelDefault,
LLVMCodeModelJITDefault,
LLVMCodeModelSmall,
LLVMCodeModelKernel,
LLVMCodeModelMedium,
LLVMCodeModelLarge
} LLVMCodeModel;
typedef enum {
LLVMAssemblyFile,
LLVMObjectFile
} LLVMCodeGenFileType;
/** Returns the first llvm::Target in the registered targets list. */
LLVMTargetRef LLVMGetFirstTarget(void);
/** Returns the next llvm::Target given a previous one (or null if there's none) */
LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T);
/*===-- Target ------------------------------------------------------------===*/
/** Finds the target corresponding to the given name and stores it in \p T.
Returns 0 on success. */
LLVMTargetRef LLVMGetTargetFromName(const char *Name);
/** Finds the target corresponding to the given triple and stores it in \p T.
Returns 0 on success. Optionally returns any error in ErrorMessage.
Use LLVMDisposeMessage to dispose the message. */
LLVMBool LLVMGetTargetFromTriple(const char* Triple, LLVMTargetRef *T,
char **ErrorMessage);
/** Returns the name of a target. See llvm::Target::getName */
const char *LLVMGetTargetName(LLVMTargetRef T);
/** Returns the description of a target. See llvm::Target::getDescription */
const char *LLVMGetTargetDescription(LLVMTargetRef T);
/** Returns if the target has a JIT */
LLVMBool LLVMTargetHasJIT(LLVMTargetRef T);
/** Returns if the target has a TargetMachine associated */
LLVMBool LLVMTargetHasTargetMachine(LLVMTargetRef T);
/** Returns if the target as an ASM backend (required for emitting output) */
LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T);
/*===-- Target Machine ----------------------------------------------------===*/
/** Creates a new llvm::TargetMachine. See llvm::Target::createTargetMachine */
LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T,
const char *Triple, const char *CPU, const char *Features,
LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc, LLVMCodeModel CodeModel);
/** Dispose the LLVMTargetMachineRef instance generated by
LLVMCreateTargetMachine. */
void LLVMDisposeTargetMachine(LLVMTargetMachineRef T);
/** Returns the Target used in a TargetMachine */
LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T);
/** Returns the triple used creating this target machine. See
llvm::TargetMachine::getTriple. The result needs to be disposed with
LLVMDisposeMessage. */
char *LLVMGetTargetMachineTriple(LLVMTargetMachineRef T);
/** Returns the cpu used creating this target machine. See
llvm::TargetMachine::getCPU. The result needs to be disposed with
LLVMDisposeMessage. */
char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T);
/** Returns the feature string used creating this target machine. See
llvm::TargetMachine::getFeatureString. The result needs to be disposed with
LLVMDisposeMessage. */
char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T);
/** Create a DataLayout based on the targetMachine. */
LLVMTargetDataRef LLVMCreateTargetDataLayout(LLVMTargetMachineRef T);
/** Set the target machine's ASM verbosity. */
void LLVMSetTargetMachineAsmVerbosity(LLVMTargetMachineRef T,
LLVMBool VerboseAsm);
/** Emits an asm or object file for the given module to the filename. This
wraps several c++ only classes (among them a file stream). Returns any
error in ErrorMessage. Use LLVMDisposeMessage to dispose the message. */
LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
char *Filename, LLVMCodeGenFileType codegen, char **ErrorMessage);
/** Compile the LLVM IR stored in \p M and store the result in \p OutMemBuf. */
LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T, LLVMModuleRef M,
LLVMCodeGenFileType codegen, char** ErrorMessage, LLVMMemoryBufferRef *OutMemBuf);
/*===-- Triple ------------------------------------------------------------===*/
/** Get a triple for the host machine as a string. The result needs to be
disposed with LLVMDisposeMessage. */
char* LLVMGetDefaultTargetTriple(void);
/** Adds the target-specific analysis passes to the pass manager. */
void LLVMAddAnalysisPasses(LLVMTargetMachineRef T, LLVMPassManagerRef PM);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,81 +0,0 @@
/*===-- IPO.h - Interprocedural Transformations C Interface -----*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMIPO.a, which implements *|
|* various interprocedural transformations of the LLVM IR. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TRANSFORMS_IPO_H
#define LLVM_C_TRANSFORMS_IPO_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTransformsIPO Interprocedural transformations
* @ingroup LLVMCTransforms
*
* @{
*/
/** See llvm::createArgumentPromotionPass function. */
void LLVMAddArgumentPromotionPass(LLVMPassManagerRef PM);
/** See llvm::createConstantMergePass function. */
void LLVMAddConstantMergePass(LLVMPassManagerRef PM);
/** See llvm::createDeadArgEliminationPass function. */
void LLVMAddDeadArgEliminationPass(LLVMPassManagerRef PM);
/** See llvm::createFunctionAttrsPass function. */
void LLVMAddFunctionAttrsPass(LLVMPassManagerRef PM);
/** See llvm::createFunctionInliningPass function. */
void LLVMAddFunctionInliningPass(LLVMPassManagerRef PM);
/** See llvm::createAlwaysInlinerPass function. */
void LLVMAddAlwaysInlinerPass(LLVMPassManagerRef PM);
/** See llvm::createGlobalDCEPass function. */
void LLVMAddGlobalDCEPass(LLVMPassManagerRef PM);
/** See llvm::createGlobalOptimizerPass function. */
void LLVMAddGlobalOptimizerPass(LLVMPassManagerRef PM);
/** See llvm::createIPConstantPropagationPass function. */
void LLVMAddIPConstantPropagationPass(LLVMPassManagerRef PM);
/** See llvm::createPruneEHPass function. */
void LLVMAddPruneEHPass(LLVMPassManagerRef PM);
/** See llvm::createIPSCCPPass function. */
void LLVMAddIPSCCPPass(LLVMPassManagerRef PM);
/** See llvm::createInternalizePass function. */
void LLVMAddInternalizePass(LLVMPassManagerRef, unsigned AllButMain);
/** See llvm::createStripDeadPrototypesPass function. */
void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM);
/** See llvm::createStripSymbolsPass function. */
void LLVMAddStripSymbolsPass(LLVMPassManagerRef PM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -1,90 +0,0 @@
/*===-- llvm-c/Transform/PassManagerBuilder.h - PMB C Interface ---*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to the PassManagerBuilder class. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TRANSFORMS_PASSMANAGERBUILDER_H
#define LLVM_C_TRANSFORMS_PASSMANAGERBUILDER_H
#include "llvm-c/Types.h"
typedef struct LLVMOpaquePassManagerBuilder *LLVMPassManagerBuilderRef;
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTransformsPassManagerBuilder Pass manager builder
* @ingroup LLVMCTransforms
*
* @{
*/
/** See llvm::PassManagerBuilder. */
LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate(void);
void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB);
/** See llvm::PassManagerBuilder::OptLevel. */
void
LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB,
unsigned OptLevel);
/** See llvm::PassManagerBuilder::SizeLevel. */
void
LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB,
unsigned SizeLevel);
/** See llvm::PassManagerBuilder::DisableUnitAtATime. */
void
LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB,
LLVMBool Value);
/** See llvm::PassManagerBuilder::DisableUnrollLoops. */
void
LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB,
LLVMBool Value);
/** See llvm::PassManagerBuilder::DisableSimplifyLibCalls */
void
LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB,
LLVMBool Value);
/** See llvm::PassManagerBuilder::Inliner. */
void
LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB,
unsigned Threshold);
/** See llvm::PassManagerBuilder::populateFunctionPassManager. */
void
LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB,
LLVMPassManagerRef PM);
/** See llvm::PassManagerBuilder::populateModulePassManager. */
void
LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB,
LLVMPassManagerRef PM);
/** See llvm::PassManagerBuilder::populateLTOPassManager. */
void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
LLVMPassManagerRef PM,
LLVMBool Internalize,
LLVMBool RunInliner);
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,158 +0,0 @@
/*===-- Scalar.h - Scalar Transformation Library C Interface ----*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMScalarOpts.a, which *|
|* implements various scalar transformations of the LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TRANSFORMS_SCALAR_H
#define LLVM_C_TRANSFORMS_SCALAR_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTransformsScalar Scalar transformations
* @ingroup LLVMCTransforms
*
* @{
*/
/** See llvm::createAggressiveDCEPass function. */
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM);
/** See llvm::createBitTrackingDCEPass function. */
void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM);
/** See llvm::createAlignmentFromAssumptionsPass function. */
void LLVMAddAlignmentFromAssumptionsPass(LLVMPassManagerRef PM);
/** See llvm::createCFGSimplificationPass function. */
void LLVMAddCFGSimplificationPass(LLVMPassManagerRef PM);
/** See llvm::createDeadStoreEliminationPass function. */
void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM);
/** See llvm::createScalarizerPass function. */
void LLVMAddScalarizerPass(LLVMPassManagerRef PM);
/** See llvm::createMergedLoadStoreMotionPass function. */
void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM);
/** See llvm::createGVNPass function. */
void LLVMAddGVNPass(LLVMPassManagerRef PM);
/** See llvm::createIndVarSimplifyPass function. */
void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM);
/** See llvm::createInstructionCombiningPass function. */
void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM);
/** See llvm::createJumpThreadingPass function. */
void LLVMAddJumpThreadingPass(LLVMPassManagerRef PM);
/** See llvm::createLICMPass function. */
void LLVMAddLICMPass(LLVMPassManagerRef PM);
/** See llvm::createLoopDeletionPass function. */
void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM);
/** See llvm::createLoopIdiomPass function */
void LLVMAddLoopIdiomPass(LLVMPassManagerRef PM);
/** See llvm::createLoopRotatePass function. */
void LLVMAddLoopRotatePass(LLVMPassManagerRef PM);
/** See llvm::createLoopRerollPass function. */
void LLVMAddLoopRerollPass(LLVMPassManagerRef PM);
/** See llvm::createLoopUnrollPass function. */
void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM);
/** See llvm::createLoopUnswitchPass function. */
void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM);
/** See llvm::createMemCpyOptPass function. */
void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM);
/** See llvm::createPartiallyInlineLibCallsPass function. */
void LLVMAddPartiallyInlineLibCallsPass(LLVMPassManagerRef PM);
/** See llvm::createLowerSwitchPass function. */
void LLVMAddLowerSwitchPass(LLVMPassManagerRef PM);
/** See llvm::createPromoteMemoryToRegisterPass function. */
void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM);
/** See llvm::createReassociatePass function. */
void LLVMAddReassociatePass(LLVMPassManagerRef PM);
/** See llvm::createSCCPPass function. */
void LLVMAddSCCPPass(LLVMPassManagerRef PM);
/** See llvm::createSROAPass function. */
void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM);
/** See llvm::createSROAPass function. */
void LLVMAddScalarReplAggregatesPassSSA(LLVMPassManagerRef PM);
/** See llvm::createSROAPass function. */
void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM,
int Threshold);
/** See llvm::createSimplifyLibCallsPass function. */
void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM);
/** See llvm::createTailCallEliminationPass function. */
void LLVMAddTailCallEliminationPass(LLVMPassManagerRef PM);
/** See llvm::createConstantPropagationPass function. */
void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM);
/** See llvm::demotePromoteMemoryToRegisterPass function. */
void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM);
/** See llvm::createVerifierPass function. */
void LLVMAddVerifierPass(LLVMPassManagerRef PM);
/** See llvm::createCorrelatedValuePropagationPass function */
void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM);
/** See llvm::createEarlyCSEPass function */
void LLVMAddEarlyCSEPass(LLVMPassManagerRef PM);
/** See llvm::createLowerExpectIntrinsicPass function */
void LLVMAddLowerExpectIntrinsicPass(LLVMPassManagerRef PM);
/** See llvm::createTypeBasedAliasAnalysisPass function */
void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM);
/** See llvm::createScopedNoAliasAAPass function */
void LLVMAddScopedNoAliasAAPass(LLVMPassManagerRef PM);
/** See llvm::createBasicAliasAnalysisPass function */
void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -1,53 +0,0 @@
/*===---------------------------Vectorize.h --------------------- -*- C -*-===*\
|*===----------- Vectorization Transformation Library C Interface ---------===*|
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMVectorize.a, which *|
|* implements various vectorization transformations of the LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TRANSFORMS_VECTORIZE_H
#define LLVM_C_TRANSFORMS_VECTORIZE_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTransformsVectorize Vectorization transformations
* @ingroup LLVMCTransforms
*
* @{
*/
/** See llvm::createBBVectorizePass function. */
void LLVMAddBBVectorizePass(LLVMPassManagerRef PM);
/** See llvm::createLoopVectorizePass function. */
void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM);
/** See llvm::createSLPVectorizerPass function. */
void LLVMAddSLPVectorizePass(LLVMPassManagerRef PM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -1,131 +0,0 @@
/*===-- llvm-c/Support.h - C Interface Types declarations ---------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines types used by the the C interface to LLVM. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TYPES_H
#define LLVM_C_TYPES_H
#include "llvm/Support/DataTypes.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCSupportTypes Types and Enumerations
*
* @{
*/
typedef int LLVMBool;
/* Opaque types. */
/**
* LLVM uses a polymorphic type hierarchy which C cannot represent, therefore
* parameters must be passed as base types. Despite the declared types, most
* of the functions provided operate only on branches of the type hierarchy.
* The declared parameter names are descriptive and specify which type is
* required. Additionally, each type hierarchy is documented along with the
* functions that operate upon it. For more detail, refer to LLVM's C++ code.
* If in doubt, refer to Core.cpp, which performs parameter downcasts in the
* form unwrap<RequiredType>(Param).
*/
/**
* Used to pass regions of memory through LLVM interfaces.
*
* @see llvm::MemoryBuffer
*/
typedef struct LLVMOpaqueMemoryBuffer *LLVMMemoryBufferRef;
/**
* The top-level container for all LLVM global data. See the LLVMContext class.
*/
typedef struct LLVMOpaqueContext *LLVMContextRef;
/**
* The top-level container for all other LLVM Intermediate Representation (IR)
* objects.
*
* @see llvm::Module
*/
typedef struct LLVMOpaqueModule *LLVMModuleRef;
/**
* Each value in the LLVM IR has a type, an LLVMTypeRef.
*
* @see llvm::Type
*/
typedef struct LLVMOpaqueType *LLVMTypeRef;
/**
* Represents an individual value in LLVM IR.
*
* This models llvm::Value.
*/
typedef struct LLVMOpaqueValue *LLVMValueRef;
/**
* Represents a basic block of instructions in LLVM IR.
*
* This models llvm::BasicBlock.
*/
typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
/**
* Represents an LLVM basic block builder.
*
* This models llvm::IRBuilder.
*/
typedef struct LLVMOpaqueBuilder *LLVMBuilderRef;
/**
* Interface used to provide a module to JIT or interpreter.
* This is now just a synonym for llvm::Module, but we have to keep using the
* different type to keep binary compatibility.
*/
typedef struct LLVMOpaqueModuleProvider *LLVMModuleProviderRef;
/** @see llvm::PassManagerBase */
typedef struct LLVMOpaquePassManager *LLVMPassManagerRef;
/** @see llvm::PassRegistry */
typedef struct LLVMOpaquePassRegistry *LLVMPassRegistryRef;
/**
* Used to get the users and usees of a Value.
*
* @see llvm::Use */
typedef struct LLVMOpaqueUse *LLVMUseRef;
/**
* Used to represent an attributes.
*
* @see llvm::Attribute
*/
typedef struct LLVMOpaqueAttributeRef *LLVMAttributeRef;
/**
* @see llvm::DiagnosticInfo
*/
typedef struct LLVMOpaqueDiagnosticInfo *LLVMDiagnosticInfoRef;
/**
* @}
*/
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,797 +0,0 @@
/*===-- llvm-c/lto.h - LTO Public C Interface ---------------------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header provides public interface to an abstract link time optimization*|
|* library. LLVM provides an implementation of this interface for use with *|
|* llvm bitcode files. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_LTO_H
#define LLVM_C_LTO_H
#ifdef __cplusplus
#include <cstddef>
#else
#include <stddef.h>
#endif
#include <sys/types.h>
#ifndef __cplusplus
#if !defined(_MSC_VER)
#include <stdbool.h>
typedef bool lto_bool_t;
#else
/* MSVC in particular does not have anything like _Bool or bool in C, but we can
at least make sure the type is the same size. The implementation side will
use C++ bool. */
typedef unsigned char lto_bool_t;
#endif
#else
typedef bool lto_bool_t;
#endif
/**
* @defgroup LLVMCLTO LTO
* @ingroup LLVMC
*
* @{
*/
#define LTO_API_VERSION 20
/**
* \since prior to LTO_API_VERSION=3
*/
typedef enum {
LTO_SYMBOL_ALIGNMENT_MASK = 0x0000001F, /* log2 of alignment */
LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0,
LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0,
LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0,
LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080,
LTO_SYMBOL_DEFINITION_MASK = 0x00000700,
LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100,
LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200,
LTO_SYMBOL_DEFINITION_WEAK = 0x00000300,
LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400,
LTO_SYMBOL_DEFINITION_WEAKUNDEF = 0x00000500,
LTO_SYMBOL_SCOPE_MASK = 0x00003800,
LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800,
LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000,
LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000,
LTO_SYMBOL_SCOPE_DEFAULT = 0x00001800,
LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN = 0x00002800,
LTO_SYMBOL_COMDAT = 0x00004000,
LTO_SYMBOL_ALIAS = 0x00008000
} lto_symbol_attributes;
/**
* \since prior to LTO_API_VERSION=3
*/
typedef enum {
LTO_DEBUG_MODEL_NONE = 0,
LTO_DEBUG_MODEL_DWARF = 1
} lto_debug_model;
/**
* \since prior to LTO_API_VERSION=3
*/
typedef enum {
LTO_CODEGEN_PIC_MODEL_STATIC = 0,
LTO_CODEGEN_PIC_MODEL_DYNAMIC = 1,
LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC = 2,
LTO_CODEGEN_PIC_MODEL_DEFAULT = 3
} lto_codegen_model;
/** opaque reference to a loaded object module */
typedef struct LLVMOpaqueLTOModule *lto_module_t;
/** opaque reference to a code generator */
typedef struct LLVMOpaqueLTOCodeGenerator *lto_code_gen_t;
/** opaque reference to a thin code generator */
typedef struct LLVMOpaqueThinLTOCodeGenerator *thinlto_code_gen_t;
#ifdef __cplusplus
extern "C" {
#endif
/**
* Returns a printable string.
*
* \since prior to LTO_API_VERSION=3
*/
extern const char*
lto_get_version(void);
/**
* Returns the last error string or NULL if last operation was successful.
*
* \since prior to LTO_API_VERSION=3
*/
extern const char*
lto_get_error_message(void);
/**
* Checks if a file is a loadable object file.
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t
lto_module_is_object_file(const char* path);
/**
* Checks if a file is a loadable object compiled for requested target.
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t
lto_module_is_object_file_for_target(const char* path,
const char* target_triple_prefix);
/**
* Return true if \p Buffer contains a bitcode file with ObjC code (category
* or class) in it.
*
* \since LTO_API_VERSION=20
*/
extern lto_bool_t
lto_module_has_objc_category(const void *mem, size_t length);
/**
* Checks if a buffer is a loadable object file.
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t lto_module_is_object_file_in_memory(const void *mem,
size_t length);
/**
* Checks if a buffer is a loadable object compiled for requested target.
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t
lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length,
const char* target_triple_prefix);
/**
* Loads an object file from disk.
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_module_t
lto_module_create(const char* path);
/**
* Loads an object file from memory.
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_module_t
lto_module_create_from_memory(const void* mem, size_t length);
/**
* Loads an object file from memory with an extra path argument.
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=9
*/
extern lto_module_t
lto_module_create_from_memory_with_path(const void* mem, size_t length,
const char *path);
/**
* \brief Loads an object file in its own context.
*
* Loads an object file in its own LLVMContext. This function call is
* thread-safe. However, modules created this way should not be merged into an
* lto_code_gen_t using \a lto_codegen_add_module().
*
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=11
*/
extern lto_module_t
lto_module_create_in_local_context(const void *mem, size_t length,
const char *path);
/**
* \brief Loads an object file in the codegen context.
*
* Loads an object file into the same context as \c cg. The module is safe to
* add using \a lto_codegen_add_module().
*
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=11
*/
extern lto_module_t
lto_module_create_in_codegen_context(const void *mem, size_t length,
const char *path, lto_code_gen_t cg);
/**
* Loads an object file from disk. The seek point of fd is not preserved.
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=5
*/
extern lto_module_t
lto_module_create_from_fd(int fd, const char *path, size_t file_size);
/**
* Loads an object file from disk. The seek point of fd is not preserved.
* Returns NULL on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=5
*/
extern lto_module_t
lto_module_create_from_fd_at_offset(int fd, const char *path, size_t file_size,
size_t map_size, off_t offset);
/**
* Frees all memory internally allocated by the module.
* Upon return the lto_module_t is no longer valid.
*
* \since prior to LTO_API_VERSION=3
*/
extern void
lto_module_dispose(lto_module_t mod);
/**
* Returns triple string which the object module was compiled under.
*
* \since prior to LTO_API_VERSION=3
*/
extern const char*
lto_module_get_target_triple(lto_module_t mod);
/**
* Sets triple string with which the object will be codegened.
*
* \since LTO_API_VERSION=4
*/
extern void
lto_module_set_target_triple(lto_module_t mod, const char *triple);
/**
* Returns the number of symbols in the object module.
*
* \since prior to LTO_API_VERSION=3
*/
extern unsigned int
lto_module_get_num_symbols(lto_module_t mod);
/**
* Returns the name of the ith symbol in the object module.
*
* \since prior to LTO_API_VERSION=3
*/
extern const char*
lto_module_get_symbol_name(lto_module_t mod, unsigned int index);
/**
* Returns the attributes of the ith symbol in the object module.
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_symbol_attributes
lto_module_get_symbol_attribute(lto_module_t mod, unsigned int index);
/**
* Returns the module's linker options.
*
* The linker options may consist of multiple flags. It is the linker's
* responsibility to split the flags using a platform-specific mechanism.
*
* \since LTO_API_VERSION=16
*/
extern const char*
lto_module_get_linkeropts(lto_module_t mod);
/**
* Diagnostic severity.
*
* \since LTO_API_VERSION=7
*/
typedef enum {
LTO_DS_ERROR = 0,
LTO_DS_WARNING = 1,
LTO_DS_REMARK = 3, // Added in LTO_API_VERSION=10.
LTO_DS_NOTE = 2
} lto_codegen_diagnostic_severity_t;
/**
* Diagnostic handler type.
* \p severity defines the severity.
* \p diag is the actual diagnostic.
* The diagnostic is not prefixed by any of severity keyword, e.g., 'error: '.
* \p ctxt is used to pass the context set with the diagnostic handler.
*
* \since LTO_API_VERSION=7
*/
typedef void (*lto_diagnostic_handler_t)(
lto_codegen_diagnostic_severity_t severity, const char *diag, void *ctxt);
/**
* Set a diagnostic handler and the related context (void *).
* This is more general than lto_get_error_message, as the diagnostic handler
* can be called at anytime within lto.
*
* \since LTO_API_VERSION=7
*/
extern void lto_codegen_set_diagnostic_handler(lto_code_gen_t,
lto_diagnostic_handler_t,
void *);
/**
* Instantiates a code generator.
* Returns NULL on error (check lto_get_error_message() for details).
*
* All modules added using \a lto_codegen_add_module() must have been created
* in the same context as the codegen.
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_code_gen_t
lto_codegen_create(void);
/**
* \brief Instantiate a code generator in its own context.
*
* Instantiates a code generator in its own context. Modules added via \a
* lto_codegen_add_module() must have all been created in the same context,
* using \a lto_module_create_in_codegen_context().
*
* \since LTO_API_VERSION=11
*/
extern lto_code_gen_t
lto_codegen_create_in_local_context(void);
/**
* Frees all code generator and all memory it internally allocated.
* Upon return the lto_code_gen_t is no longer valid.
*
* \since prior to LTO_API_VERSION=3
*/
extern void
lto_codegen_dispose(lto_code_gen_t);
/**
* Add an object module to the set of modules for which code will be generated.
* Returns true on error (check lto_get_error_message() for details).
*
* \c cg and \c mod must both be in the same context. See \a
* lto_codegen_create_in_local_context() and \a
* lto_module_create_in_codegen_context().
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t
lto_codegen_add_module(lto_code_gen_t cg, lto_module_t mod);
/**
* Sets the object module for code generation. This will transfer the ownership
* of the module to the code generator.
*
* \c cg and \c mod must both be in the same context.
*
* \since LTO_API_VERSION=13
*/
extern void
lto_codegen_set_module(lto_code_gen_t cg, lto_module_t mod);
/**
* Sets if debug info should be generated.
* Returns true on error (check lto_get_error_message() for details).
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t
lto_codegen_set_debug_model(lto_code_gen_t cg, lto_debug_model);
/**
* Sets which PIC code model to generated.
* Returns true on error (check lto_get_error_message() for details).
*
* \since prior to LTO_API_VERSION=3
*/
extern lto_bool_t
lto_codegen_set_pic_model(lto_code_gen_t cg, lto_codegen_model);
/**
* Sets the cpu to generate code for.
*
* \since LTO_API_VERSION=4
*/
extern void
lto_codegen_set_cpu(lto_code_gen_t cg, const char *cpu);
/**
* Sets the location of the assembler tool to run. If not set, libLTO
* will use gcc to invoke the assembler.
*
* \since LTO_API_VERSION=3
*/
extern void
lto_codegen_set_assembler_path(lto_code_gen_t cg, const char* path);
/**
* Sets extra arguments that libLTO should pass to the assembler.
*
* \since LTO_API_VERSION=4
*/
extern void
lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
int nargs);
/**
* Adds to a list of all global symbols that must exist in the final generated
* code. If a function is not listed there, it might be inlined into every usage
* and optimized away.
*
* \since prior to LTO_API_VERSION=3
*/
extern void
lto_codegen_add_must_preserve_symbol(lto_code_gen_t cg, const char* symbol);
/**
* Writes a new object file at the specified path that contains the
* merged contents of all modules added so far.
* Returns true on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=5
*/
extern lto_bool_t
lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path);
/**
* Generates code for all added modules into one native object file.
* This calls lto_codegen_optimize then lto_codegen_compile_optimized.
*
* On success returns a pointer to a generated mach-o/ELF buffer and
* length set to the buffer size. The buffer is owned by the
* lto_code_gen_t and will be freed when lto_codegen_dispose()
* is called, or lto_codegen_compile() is called again.
* On failure, returns NULL (check lto_get_error_message() for details).
*
* \since prior to LTO_API_VERSION=3
*/
extern const void*
lto_codegen_compile(lto_code_gen_t cg, size_t* length);
/**
* Generates code for all added modules into one native object file.
* This calls lto_codegen_optimize then lto_codegen_compile_optimized (instead
* of returning a generated mach-o/ELF buffer, it writes to a file).
*
* The name of the file is written to name. Returns true on error.
*
* \since LTO_API_VERSION=5
*/
extern lto_bool_t
lto_codegen_compile_to_file(lto_code_gen_t cg, const char** name);
/**
* Runs optimization for the merged module. Returns true on error.
*
* \since LTO_API_VERSION=12
*/
extern lto_bool_t
lto_codegen_optimize(lto_code_gen_t cg);
/**
* Generates code for the optimized merged module into one native object file.
* It will not run any IR optimizations on the merged module.
*
* On success returns a pointer to a generated mach-o/ELF buffer and length set
* to the buffer size. The buffer is owned by the lto_code_gen_t and will be
* freed when lto_codegen_dispose() is called, or
* lto_codegen_compile_optimized() is called again. On failure, returns NULL
* (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=12
*/
extern const void*
lto_codegen_compile_optimized(lto_code_gen_t cg, size_t* length);
/**
* Returns the runtime API version.
*
* \since LTO_API_VERSION=12
*/
extern unsigned int
lto_api_version(void);
/**
* Sets options to help debug codegen bugs.
*
* \since prior to LTO_API_VERSION=3
*/
extern void
lto_codegen_debug_options(lto_code_gen_t cg, const char *);
/**
* Initializes LLVM disassemblers.
* FIXME: This doesn't really belong here.
*
* \since LTO_API_VERSION=5
*/
extern void
lto_initialize_disassembler(void);
/**
* Sets if we should run internalize pass during optimization and code
* generation.
*
* \since LTO_API_VERSION=14
*/
extern void
lto_codegen_set_should_internalize(lto_code_gen_t cg,
lto_bool_t ShouldInternalize);
/**
* \brief Set whether to embed uselists in bitcode.
*
* Sets whether \a lto_codegen_write_merged_modules() should embed uselists in
* output bitcode. This should be turned on for all -save-temps output.
*
* \since LTO_API_VERSION=15
*/
extern void
lto_codegen_set_should_embed_uselists(lto_code_gen_t cg,
lto_bool_t ShouldEmbedUselists);
/**
* @}
* @defgroup LLVMCTLTO ThinLTO
* @ingroup LLVMC
*
* @{
*/
/**
* Type to wrap a single object returned by ThinLTO.
*
* \since LTO_API_VERSION=18
*/
typedef struct {
const char *Buffer;
size_t Size;
} LTOObjectBuffer;
/**
* Instantiates a ThinLTO code generator.
* Returns NULL on error (check lto_get_error_message() for details).
*
*
* The ThinLTOCodeGenerator is not intended to be reuse for multiple
* compilation: the model is that the client adds modules to the generator and
* ask to perform the ThinLTO optimizations / codegen, and finally destroys the
* codegenerator.
*
* \since LTO_API_VERSION=18
*/
extern thinlto_code_gen_t thinlto_create_codegen(void);
/**
* Frees the generator and all memory it internally allocated.
* Upon return the thinlto_code_gen_t is no longer valid.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_dispose(thinlto_code_gen_t cg);
/**
* Add a module to a ThinLTO code generator. Identifier has to be unique among
* all the modules in a code generator. The data buffer stays owned by the
* client, and is expected to be available for the entire lifetime of the
* thinlto_code_gen_t it is added to.
*
* On failure, returns NULL (check lto_get_error_message() for details).
*
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_add_module(thinlto_code_gen_t cg,
const char *identifier, const char *data,
int length);
/**
* Optimize and codegen all the modules added to the codegenerator using
* ThinLTO. Resulting objects are accessible using thinlto_module_get_object().
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_process(thinlto_code_gen_t cg);
/**
* Returns the number of object files produced by the ThinLTO CodeGenerator.
*
* It usually matches the number of input files, but this is not a guarantee of
* the API and may change in future implementation, so the client should not
* assume it.
*
* \since LTO_API_VERSION=18
*/
extern unsigned int thinlto_module_get_num_objects(thinlto_code_gen_t cg);
/**
* Returns a reference to the ith object file produced by the ThinLTO
* CodeGenerator.
*
* Client should use \p thinlto_module_get_num_objects() to get the number of
* available objects.
*
* \since LTO_API_VERSION=18
*/
extern LTOObjectBuffer thinlto_module_get_object(thinlto_code_gen_t cg,
unsigned int index);
/**
* Sets which PIC code model to generate.
* Returns true on error (check lto_get_error_message() for details).
*
* \since LTO_API_VERSION=18
*/
extern lto_bool_t thinlto_codegen_set_pic_model(thinlto_code_gen_t cg,
lto_codegen_model);
/**
* @}
* @defgroup LLVMCTLTO_CACHING ThinLTO Cache Control
* @ingroup LLVMCTLTO
*
* These entry points control the ThinLTO cache. The cache is intended to
* support incremental build, and thus needs to be persistent accross build.
* The client enabled the cache by supplying a path to an existing directory.
* The code generator will use this to store objects files that may be reused
* during a subsequent build.
* To avoid filling the disk space, a few knobs are provided:
* - The pruning interval limit the frequency at which the garbage collector
* will try to scan the cache directory to prune it from expired entries.
* Setting to -1 disable the pruning (default).
* - The pruning expiration time indicates to the garbage collector how old an
* entry needs to be to be removed.
* - Finally, the garbage collector can be instructed to prune the cache till
* the occupied space goes below a threshold.
* @{
*/
/**
* Sets the path to a directory to use as a cache storage for incremental build.
* Setting this activates caching.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_set_cache_dir(thinlto_code_gen_t cg,
const char *cache_dir);
/**
* Sets the cache pruning interval (in seconds). A negative value disable the
* pruning. An unspecified default value will be applied, and a value of 0 will
* be ignored.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_set_cache_pruning_interval(thinlto_code_gen_t cg,
int interval);
/**
* Sets the maximum cache size that can be persistent across build, in terms of
* percentage of the available space on the the disk. Set to 100 to indicate
* no limit, 50 to indicate that the cache size will not be left over half the
* available space. A value over 100 will be reduced to 100, a value of 0 will
* be ignored. An unspecified default value will be applied.
*
* The formula looks like:
* AvailableSpace = FreeSpace + ExistingCacheSize
* NewCacheSize = AvailableSpace * P/100
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_set_final_cache_size_relative_to_available_space(
thinlto_code_gen_t cg, unsigned percentage);
/**
* Sets the expiration (in seconds) for an entry in the cache. An unspecified
* default value will be applied. A value of 0 will be ignored.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_set_cache_entry_expiration(thinlto_code_gen_t cg,
unsigned expiration);
/**
* @}
*/
/**
* Sets the path to a directory to use as a storage for temporary bitcode files.
* The intention is to make the bitcode files available for debugging at various
* stage of the pipeline.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_set_savetemps_dir(thinlto_code_gen_t cg,
const char *save_temps_dir);
/**
* Sets the cpu to generate code for.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_set_cpu(thinlto_code_gen_t cg, const char *cpu);
/**
* Disable CodeGen, only run the stages till codegen and stop. The output will
* be bitcode.
*
* \since LTO_API_VERSION=19
*/
extern void thinlto_codegen_disable_codegen(thinlto_code_gen_t cg,
lto_bool_t disable);
/**
* Perform CodeGen only: disable all other stages.
*
* \since LTO_API_VERSION=19
*/
extern void thinlto_codegen_set_codegen_only(thinlto_code_gen_t cg,
lto_bool_t codegen_only);
/**
* Parse -mllvm style debug options.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_debug_options(const char *const *options, int number);
/**
* Test if a module has support for ThinLTO linking.
*
* \since LTO_API_VERSION=18
*/
extern lto_bool_t lto_module_is_thinlto(lto_module_t mod);
/**
* Adds a symbol to the list of global symbols that must exist in the final
* generated code. If a function is not listed there, it might be inlined into
* every usage and optimized away. For every single module, the functions
* referenced from code outside of the ThinLTO modules need to be added here.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_add_must_preserve_symbol(thinlto_code_gen_t cg,
const char *name,
int length);
/**
* Adds a symbol to the list of global symbols that are cross-referenced between
* ThinLTO files. If the ThinLTO CodeGenerator can ensure that every
* references from a ThinLTO module to this symbol is optimized away, then
* the symbol can be discarded.
*
* \since LTO_API_VERSION=18
*/
extern void thinlto_codegen_add_cross_referenced_symbol(thinlto_code_gen_t cg,
const char *name,
int length);
#ifdef __cplusplus
}
#endif
/**
* @}
*/
#endif /* LLVM_C_LTO_H */

View File

@ -1,689 +0,0 @@
//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief
/// This file declares a class to represent arbitrary precision floating point
/// values and provide a variety of arithmetic operations on them.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_APFLOAT_H
#define LLVM_ADT_APFLOAT_H
#include "llvm/ADT/APInt.h"
namespace llvm {
struct fltSemantics;
class APSInt;
class StringRef;
template <typename T> class SmallVectorImpl;
/// Enum that represents what fraction of the LSB truncated bits of an fp number
/// represent.
///
/// This essentially combines the roles of guard and sticky bits.
enum lostFraction { // Example of truncated bits:
lfExactlyZero, // 000000
lfLessThanHalf, // 0xxxxx x's not all zero
lfExactlyHalf, // 100000
lfMoreThanHalf // 1xxxxx x's not all zero
};
/// \brief A self-contained host- and target-independent arbitrary-precision
/// floating-point software implementation.
///
/// APFloat uses bignum integer arithmetic as provided by static functions in
/// the APInt class. The library will work with bignum integers whose parts are
/// any unsigned type at least 16 bits wide, but 64 bits is recommended.
///
/// Written for clarity rather than speed, in particular with a view to use in
/// the front-end of a cross compiler so that target arithmetic can be correctly
/// performed on the host. Performance should nonetheless be reasonable,
/// particularly for its intended use. It may be useful as a base
/// implementation for a run-time library during development of a faster
/// target-specific one.
///
/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
/// implemented operations. Currently implemented operations are add, subtract,
/// multiply, divide, fused-multiply-add, conversion-to-float,
/// conversion-to-integer and conversion-from-integer. New rounding modes
/// (e.g. away from zero) can be added with three or four lines of code.
///
/// Four formats are built-in: IEEE single precision, double precision,
/// quadruple precision, and x87 80-bit extended double (when operating with
/// full extended precision). Adding a new format that obeys IEEE semantics
/// only requires adding two lines of code: a declaration and definition of the
/// format.
///
/// All operations return the status of that operation as an exception bit-mask,
/// so multiple operations can be done consecutively with their results or-ed
/// together. The returned status can be useful for compiler diagnostics; e.g.,
/// inexact, underflow and overflow can be easily diagnosed on constant folding,
/// and compiler optimizers can determine what exceptions would be raised by
/// folding operations and optimize, or perhaps not optimize, accordingly.
///
/// At present, underflow tininess is detected after rounding; it should be
/// straight forward to add support for the before-rounding case too.
///
/// The library reads hexadecimal floating point numbers as per C99, and
/// correctly rounds if necessary according to the specified rounding mode.
/// Syntax is required to have been validated by the caller. It also converts
/// floating point numbers to hexadecimal text as per the C99 %a and %A
/// conversions. The output precision (or alternatively the natural minimal
/// precision) can be specified; if the requested precision is less than the
/// natural precision the output is correctly rounded for the specified rounding
/// mode.
///
/// It also reads decimal floating point numbers and correctly rounds according
/// to the specified rounding mode.
///
/// Conversion to decimal text is not currently implemented.
///
/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
/// signed exponent, and the significand as an array of integer parts. After
/// normalization of a number of precision P the exponent is within the range of
/// the format, and if the number is not denormal the P-th bit of the
/// significand is set as an explicit integer bit. For denormals the most
/// significant bit is shifted right so that the exponent is maintained at the
/// format's minimum, so that the smallest denormal has just the least
/// significant bit of the significand set. The sign of zeroes and infinities
/// is significant; the exponent and significand of such numbers is not stored,
/// but has a known implicit (deterministic) value: 0 for the significands, 0
/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and
/// significand are deterministic, although not really meaningful, and preserved
/// in non-conversion operations. The exponent is implicitly all 1 bits.
///
/// APFloat does not provide any exception handling beyond default exception
/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
/// by encoding Signaling NaNs with the first bit of its trailing significand as
/// 0.
///
/// TODO
/// ====
///
/// Some features that may or may not be worth adding:
///
/// Binary to decimal conversion (hard).
///
/// Optional ability to detect underflow tininess before rounding.
///
/// New formats: x87 in single and double precision mode (IEEE apart from
/// extended exponent range) (hard).
///
/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
///
class APFloat {
public:
/// A signed type to represent a floating point numbers unbiased exponent.
typedef signed short ExponentType;
/// \name Floating Point Semantics.
/// @{
static const fltSemantics IEEEhalf;
static const fltSemantics IEEEsingle;
static const fltSemantics IEEEdouble;
static const fltSemantics IEEEquad;
static const fltSemantics PPCDoubleDouble;
static const fltSemantics x87DoubleExtended;
/// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
/// anything real.
static const fltSemantics Bogus;
/// @}
static unsigned int semanticsPrecision(const fltSemantics &);
static ExponentType semanticsMinExponent(const fltSemantics &);
static ExponentType semanticsMaxExponent(const fltSemantics &);
static unsigned int semanticsSizeInBits(const fltSemantics &);
/// IEEE-754R 5.11: Floating Point Comparison Relations.
enum cmpResult {
cmpLessThan,
cmpEqual,
cmpGreaterThan,
cmpUnordered
};
/// IEEE-754R 4.3: Rounding-direction attributes.
enum roundingMode {
rmNearestTiesToEven,
rmTowardPositive,
rmTowardNegative,
rmTowardZero,
rmNearestTiesToAway
};
/// IEEE-754R 7: Default exception handling.
///
/// opUnderflow or opOverflow are always returned or-ed with opInexact.
enum opStatus {
opOK = 0x00,
opInvalidOp = 0x01,
opDivByZero = 0x02,
opOverflow = 0x04,
opUnderflow = 0x08,
opInexact = 0x10
};
/// Category of internally-represented number.
enum fltCategory {
fcInfinity,
fcNaN,
fcNormal,
fcZero
};
/// Convenience enum used to construct an uninitialized APFloat.
enum uninitializedTag {
uninitialized
};
/// \name Constructors
/// @{
APFloat(const fltSemantics &); // Default construct to 0.0
APFloat(const fltSemantics &, StringRef);
APFloat(const fltSemantics &, integerPart);
APFloat(const fltSemantics &, uninitializedTag);
APFloat(const fltSemantics &, const APInt &);
explicit APFloat(double d);
explicit APFloat(float f);
APFloat(const APFloat &);
APFloat(APFloat &&);
~APFloat();
/// @}
/// \brief Returns whether this instance allocated memory.
bool needsCleanup() const { return partCount() > 1; }
/// \name Convenience "constructors"
/// @{
/// Factory for Positive and Negative Zero.
///
/// \param Negative True iff the number should be negative.
static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
APFloat Val(Sem, uninitialized);
Val.makeZero(Negative);
return Val;
}
/// Factory for Positive and Negative Infinity.
///
/// \param Negative True iff the number should be negative.
static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
APFloat Val(Sem, uninitialized);
Val.makeInf(Negative);
return Val;
}
/// Factory for QNaN values.
///
/// \param Negative - True iff the NaN generated should be negative.
/// \param type - The unspecified fill bits for creating the NaN, 0 by
/// default. The value is truncated as necessary.
static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
unsigned type = 0) {
if (type) {
APInt fill(64, type);
return getQNaN(Sem, Negative, &fill);
} else {
return getQNaN(Sem, Negative, nullptr);
}
}
/// Factory for QNaN values.
static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
const APInt *payload = nullptr) {
return makeNaN(Sem, false, Negative, payload);
}
/// Factory for SNaN values.
static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
const APInt *payload = nullptr) {
return makeNaN(Sem, true, Negative, payload);
}
/// Returns the largest finite number in the given semantics.
///
/// \param Negative - True iff the number should be negative
static APFloat getLargest(const fltSemantics &Sem, bool Negative = false);
/// Returns the smallest (by magnitude) finite number in the given semantics.
/// Might be denormalized, which implies a relative loss of precision.
///
/// \param Negative - True iff the number should be negative
static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false);
/// Returns the smallest (by magnitude) normalized finite number in the given
/// semantics.
///
/// \param Negative - True iff the number should be negative
static APFloat getSmallestNormalized(const fltSemantics &Sem,
bool Negative = false);
/// Returns a float which is bitcasted from an all one value int.
///
/// \param BitWidth - Select float type
/// \param isIEEE - If 128 bit number, select between PPC and IEEE
static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
/// Returns the size of the floating point number (in bits) in the given
/// semantics.
static unsigned getSizeInBits(const fltSemantics &Sem);
/// @}
/// Used to insert APFloat objects, or objects that contain APFloat objects,
/// into FoldingSets.
void Profile(FoldingSetNodeID &NID) const;
/// \name Arithmetic
/// @{
opStatus add(const APFloat &, roundingMode);
opStatus subtract(const APFloat &, roundingMode);
opStatus multiply(const APFloat &, roundingMode);
opStatus divide(const APFloat &, roundingMode);
/// IEEE remainder.
opStatus remainder(const APFloat &);
/// C fmod, or llvm frem.
opStatus mod(const APFloat &);
opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
opStatus roundToIntegral(roundingMode);
/// IEEE-754R 5.3.1: nextUp/nextDown.
opStatus next(bool nextDown);
/// \brief Operator+ overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
APFloat operator+(const APFloat &RHS) const {
APFloat Result = *this;
Result.add(RHS, rmNearestTiesToEven);
return Result;
}
/// \brief Operator- overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
APFloat operator-(const APFloat &RHS) const {
APFloat Result = *this;
Result.subtract(RHS, rmNearestTiesToEven);
return Result;
}
/// \brief Operator* overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
APFloat operator*(const APFloat &RHS) const {
APFloat Result = *this;
Result.multiply(RHS, rmNearestTiesToEven);
return Result;
}
/// \brief Operator/ overload which provides the default
/// \c nmNearestTiesToEven rounding mode and *no* error checking.
APFloat operator/(const APFloat &RHS) const {
APFloat Result = *this;
Result.divide(RHS, rmNearestTiesToEven);
return Result;
}
/// @}
/// \name Sign operations.
/// @{
void changeSign();
void clearSign();
void copySign(const APFloat &);
/// \brief A static helper to produce a copy of an APFloat value with its sign
/// copied from some other APFloat.
static APFloat copySign(APFloat Value, const APFloat &Sign) {
Value.copySign(Sign);
return Value;
}
/// @}
/// \name Conversions
/// @{
opStatus convert(const fltSemantics &, roundingMode, bool *);
opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode,
bool *) const;
opStatus convertToInteger(APSInt &, roundingMode, bool *) const;
opStatus convertFromAPInt(const APInt &, bool, roundingMode);
opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
opStatus convertFromString(StringRef, roundingMode);
APInt bitcastToAPInt() const;
double convertToDouble() const;
float convertToFloat() const;
/// @}
/// The definition of equality is not straightforward for floating point, so
/// we won't use operator==. Use one of the following, or write whatever it
/// is you really mean.
bool operator==(const APFloat &) const = delete;
/// IEEE comparison with another floating point number (NaNs compare
/// unordered, 0==-0).
cmpResult compare(const APFloat &) const;
/// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
bool bitwiseIsEqual(const APFloat &) const;
/// Write out a hexadecimal representation of the floating point value to DST,
/// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
/// Return the number of characters written, excluding the terminating NUL.
unsigned int convertToHexString(char *dst, unsigned int hexDigits,
bool upperCase, roundingMode) const;
/// \name IEEE-754R 5.7.2 General operations.
/// @{
/// IEEE-754R isSignMinus: Returns true if and only if the current value is
/// negative.
///
/// This applies to zeros and NaNs as well.
bool isNegative() const { return sign; }
/// IEEE-754R isNormal: Returns true if and only if the current value is normal.
///
/// This implies that the current value of the float is not zero, subnormal,
/// infinite, or NaN following the definition of normality from IEEE-754R.
bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
/// Returns true if and only if the current value is zero, subnormal, or
/// normal.
///
/// This means that the value is not infinite or NaN.
bool isFinite() const { return !isNaN() && !isInfinity(); }
/// Returns true if and only if the float is plus or minus zero.
bool isZero() const { return category == fcZero; }
/// IEEE-754R isSubnormal(): Returns true if and only if the float is a
/// denormal.
bool isDenormal() const;
/// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
bool isInfinity() const { return category == fcInfinity; }
/// Returns true if and only if the float is a quiet or signaling NaN.
bool isNaN() const { return category == fcNaN; }
/// Returns true if and only if the float is a signaling NaN.
bool isSignaling() const;
/// @}
/// \name Simple Queries
/// @{
fltCategory getCategory() const { return category; }
const fltSemantics &getSemantics() const { return *semantics; }
bool isNonZero() const { return category != fcZero; }
bool isFiniteNonZero() const { return isFinite() && !isZero(); }
bool isPosZero() const { return isZero() && !isNegative(); }
bool isNegZero() const { return isZero() && isNegative(); }
/// Returns true if and only if the number has the smallest possible non-zero
/// magnitude in the current semantics.
bool isSmallest() const;
/// Returns true if and only if the number has the largest possible finite
/// magnitude in the current semantics.
bool isLargest() const;
/// Returns true if and only if the number is an exact integer.
bool isInteger() const;
/// @}
APFloat &operator=(const APFloat &);
APFloat &operator=(APFloat &&);
/// \brief Overload to compute a hash code for an APFloat value.
///
/// Note that the use of hash codes for floating point values is in general
/// frought with peril. Equality is hard to define for these values. For
/// example, should negative and positive zero hash to different codes? Are
/// they equal or not? This hash value implementation specifically
/// emphasizes producing different codes for different inputs in order to
/// be used in canonicalization and memoization. As such, equality is
/// bitwiseIsEqual, and 0 != -0.
friend hash_code hash_value(const APFloat &Arg);
/// Converts this value into a decimal string.
///
/// \param FormatPrecision The maximum number of digits of
/// precision to output. If there are fewer digits available,
/// zero padding will not be used unless the value is
/// integral and small enough to be expressed in
/// FormatPrecision digits. 0 means to use the natural
/// precision of the number.
/// \param FormatMaxPadding The maximum number of zeros to
/// consider inserting before falling back to scientific
/// notation. 0 means to always use scientific notation.
///
/// Number Precision MaxPadding Result
/// ------ --------- ---------- ------
/// 1.01E+4 5 2 10100
/// 1.01E+4 4 2 1.01E+4
/// 1.01E+4 5 1 1.01E+4
/// 1.01E-2 5 2 0.0101
/// 1.01E-2 4 2 0.0101
/// 1.01E-2 4 1 1.01E-2
void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
unsigned FormatMaxPadding = 3) const;
/// If this value has an exact multiplicative inverse, store it in inv and
/// return true.
bool getExactInverse(APFloat *inv) const;
/// \brief Enumeration of \c ilogb error results.
enum IlogbErrorKinds {
IEK_Zero = INT_MIN+1,
IEK_NaN = INT_MIN,
IEK_Inf = INT_MAX
};
/// \brief Returns the exponent of the internal representation of the APFloat.
///
/// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)).
/// For special APFloat values, this returns special error codes:
///
/// NaN -> \c IEK_NaN
/// 0 -> \c IEK_Zero
/// Inf -> \c IEK_Inf
///
friend int ilogb(const APFloat &Arg);
/// \brief Returns: X * 2^Exp for integral exponents.
friend APFloat scalbn(APFloat X, int Exp, roundingMode);
friend APFloat frexp(const APFloat &X, int &Exp, roundingMode);
private:
/// \name Simple Queries
/// @{
integerPart *significandParts();
const integerPart *significandParts() const;
unsigned int partCount() const;
/// @}
/// \name Significand operations.
/// @{
integerPart addSignificand(const APFloat &);
integerPart subtractSignificand(const APFloat &, integerPart);
lostFraction addOrSubtractSignificand(const APFloat &, bool subtract);
lostFraction multiplySignificand(const APFloat &, const APFloat *);
lostFraction divideSignificand(const APFloat &);
void incrementSignificand();
void initialize(const fltSemantics *);
void shiftSignificandLeft(unsigned int);
lostFraction shiftSignificandRight(unsigned int);
unsigned int significandLSB() const;
unsigned int significandMSB() const;
void zeroSignificand();
/// Return true if the significand excluding the integral bit is all ones.
bool isSignificandAllOnes() const;
/// Return true if the significand excluding the integral bit is all zeros.
bool isSignificandAllZeros() const;
/// @}
/// \name Arithmetic on special values.
/// @{
opStatus addOrSubtractSpecials(const APFloat &, bool subtract);
opStatus divideSpecials(const APFloat &);
opStatus multiplySpecials(const APFloat &);
opStatus modSpecials(const APFloat &);
/// @}
/// \name Special value setters.
/// @{
void makeLargest(bool Neg = false);
void makeSmallest(bool Neg = false);
void makeNaN(bool SNaN = false, bool Neg = false,
const APInt *fill = nullptr);
static APFloat makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative,
const APInt *fill);
void makeInf(bool Neg = false);
void makeZero(bool Neg = false);
void makeQuiet();
/// @}
/// \name Miscellany
/// @{
bool convertFromStringSpecials(StringRef str);
opStatus normalize(roundingMode, lostFraction);
opStatus addOrSubtract(const APFloat &, roundingMode, bool subtract);
cmpResult compareAbsoluteValue(const APFloat &) const;
opStatus handleOverflow(roundingMode);
bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
opStatus convertToSignExtendedInteger(integerPart *, unsigned int, bool,
roundingMode, bool *) const;
opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
roundingMode);
opStatus convertFromHexadecimalString(StringRef, roundingMode);
opStatus convertFromDecimalString(StringRef, roundingMode);
char *convertNormalToHexString(char *, unsigned int, bool,
roundingMode) const;
opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
roundingMode);
/// @}
APInt convertHalfAPFloatToAPInt() const;
APInt convertFloatAPFloatToAPInt() const;
APInt convertDoubleAPFloatToAPInt() const;
APInt convertQuadrupleAPFloatToAPInt() const;
APInt convertF80LongDoubleAPFloatToAPInt() const;
APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
void initFromAPInt(const fltSemantics *Sem, const APInt &api);
void initFromHalfAPInt(const APInt &api);
void initFromFloatAPInt(const APInt &api);
void initFromDoubleAPInt(const APInt &api);
void initFromQuadrupleAPInt(const APInt &api);
void initFromF80LongDoubleAPInt(const APInt &api);
void initFromPPCDoubleDoubleAPInt(const APInt &api);
void assign(const APFloat &);
void copySignificand(const APFloat &);
void freeSignificand();
/// The semantics that this value obeys.
const fltSemantics *semantics;
/// A binary fraction with an explicit integer bit.
///
/// The significand must be at least one bit wider than the target precision.
union Significand {
integerPart part;
integerPart *parts;
} significand;
/// The signed unbiased exponent of the value.
ExponentType exponent;
/// What kind of floating point number this is.
///
/// Only 2 bits are required, but VisualStudio incorrectly sign extends it.
/// Using the extra bit keeps it from failing under VisualStudio.
fltCategory category : 3;
/// Sign bit of the number.
unsigned int sign : 1;
};
/// See friend declarations above.
///
/// These additional declarations are required in order to compile LLVM with IBM
/// xlC compiler.
hash_code hash_value(const APFloat &Arg);
int ilogb(const APFloat &Arg);
APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode);
/// \brief Equivalent of C standard library function.
///
/// While the C standard says Exp is an unspecified value for infinity and nan,
/// this returns INT_MAX for infinities, and INT_MIN for NaNs.
APFloat frexp(const APFloat &Val, int &Exp, APFloat::roundingMode RM);
/// \brief Returns the absolute value of the argument.
inline APFloat abs(APFloat X) {
X.clearSign();
return X;
}
/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
/// both are not NaN. If either argument is a NaN, returns the other argument.
LLVM_READONLY
inline APFloat minnum(const APFloat &A, const APFloat &B) {
if (A.isNaN())
return B;
if (B.isNaN())
return A;
return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
}
/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
/// both are not NaN. If either argument is a NaN, returns the other argument.
LLVM_READONLY
inline APFloat maxnum(const APFloat &A, const APFloat &B) {
if (A.isNaN())
return B;
if (B.isNaN())
return A;
return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
}
} // namespace llvm
#endif // LLVM_ADT_APFLOAT_H

File diff suppressed because it is too large Load Diff

View File

@ -1,342 +0,0 @@
//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the APSInt class, which is a simple class that
// represents an arbitrary sized integer that knows its signedness.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_APSINT_H
#define LLVM_ADT_APSINT_H
#include "llvm/ADT/APInt.h"
namespace llvm {
class APSInt : public APInt {
bool IsUnsigned;
public:
/// Default constructor that creates an uninitialized APInt.
explicit APSInt() : IsUnsigned(false) {}
/// APSInt ctor - Create an APSInt with the specified width, default to
/// unsigned.
explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
: APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}
explicit APSInt(APInt I, bool isUnsigned = true)
: APInt(std::move(I)), IsUnsigned(isUnsigned) {}
/// Construct an APSInt from a string representation.
///
/// This constructor interprets the string \p Str using the radix of 10.
/// The interpretation stops at the end of the string. The bit width of the
/// constructed APSInt is determined automatically.
///
/// \param Str the string to be interpreted.
explicit APSInt(StringRef Str);
APSInt &operator=(APInt RHS) {
// Retain our current sign.
APInt::operator=(std::move(RHS));
return *this;
}
APSInt &operator=(uint64_t RHS) {
// Retain our current sign.
APInt::operator=(RHS);
return *this;
}
// Query sign information.
bool isSigned() const { return !IsUnsigned; }
bool isUnsigned() const { return IsUnsigned; }
void setIsUnsigned(bool Val) { IsUnsigned = Val; }
void setIsSigned(bool Val) { IsUnsigned = !Val; }
/// toString - Append this APSInt to the specified SmallString.
void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
APInt::toString(Str, Radix, isSigned());
}
/// toString - Converts an APInt to a std::string. This is an inefficient
/// method; you should prefer passing in a SmallString instead.
std::string toString(unsigned Radix) const {
return APInt::toString(Radix, isSigned());
}
using APInt::toString;
/// \brief Get the correctly-extended \c int64_t value.
int64_t getExtValue() const {
assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
return isSigned() ? getSExtValue() : getZExtValue();
}
APSInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(uint32_t width) const {
return APSInt(APInt::trunc(width), IsUnsigned);
}
APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extend(uint32_t width) const {
if (IsUnsigned)
return APSInt(zext(width), IsUnsigned);
else
return APSInt(sext(width), IsUnsigned);
}
APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extOrTrunc(uint32_t width) const {
if (IsUnsigned)
return APSInt(zextOrTrunc(width), IsUnsigned);
else
return APSInt(sextOrTrunc(width), IsUnsigned);
}
const APSInt &operator%=(const APSInt &RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
if (IsUnsigned)
*this = urem(RHS);
else
*this = srem(RHS);
return *this;
}
const APSInt &operator/=(const APSInt &RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
if (IsUnsigned)
*this = udiv(RHS);
else
*this = sdiv(RHS);
return *this;
}
APSInt operator%(const APSInt &RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
}
APSInt operator/(const APSInt &RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
}
APSInt operator>>(unsigned Amt) const {
return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
}
APSInt& operator>>=(unsigned Amt) {
*this = *this >> Amt;
return *this;
}
inline bool operator<(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? ult(RHS) : slt(RHS);
}
inline bool operator>(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? ugt(RHS) : sgt(RHS);
}
inline bool operator<=(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? ule(RHS) : sle(RHS);
}
inline bool operator>=(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? uge(RHS) : sge(RHS);
}
inline bool operator==(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return eq(RHS);
}
inline bool operator!=(const APSInt& RHS) const {
return !((*this) == RHS);
}
bool operator==(int64_t RHS) const {
return compareValues(*this, get(RHS)) == 0;
}
bool operator!=(int64_t RHS) const {
return compareValues(*this, get(RHS)) != 0;
}
bool operator<=(int64_t RHS) const {
return compareValues(*this, get(RHS)) <= 0;
}
bool operator>=(int64_t RHS) const {
return compareValues(*this, get(RHS)) >= 0;
}
bool operator<(int64_t RHS) const {
return compareValues(*this, get(RHS)) < 0;
}
bool operator>(int64_t RHS) const {
return compareValues(*this, get(RHS)) > 0;
}
// The remaining operators just wrap the logic of APInt, but retain the
// signedness information.
APSInt operator<<(unsigned Bits) const {
return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
}
APSInt& operator<<=(unsigned Amt) {
*this = *this << Amt;
return *this;
}
APSInt& operator++() {
++(static_cast<APInt&>(*this));
return *this;
}
APSInt& operator--() {
--(static_cast<APInt&>(*this));
return *this;
}
APSInt operator++(int) {
return APSInt(++static_cast<APInt&>(*this), IsUnsigned);
}
APSInt operator--(int) {
return APSInt(--static_cast<APInt&>(*this), IsUnsigned);
}
APSInt operator-() const {
return APSInt(-static_cast<const APInt&>(*this), IsUnsigned);
}
APSInt& operator+=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) += RHS;
return *this;
}
APSInt& operator-=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) -= RHS;
return *this;
}
APSInt& operator*=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) *= RHS;
return *this;
}
APSInt& operator&=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) &= RHS;
return *this;
}
APSInt& operator|=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) |= RHS;
return *this;
}
APSInt& operator^=(const APSInt& RHS) {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
static_cast<APInt&>(*this) ^= RHS;
return *this;
}
APSInt operator&(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
}
APSInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APSInt& RHS) const {
return this->operator&(RHS);
}
APSInt operator|(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
}
APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APSInt& RHS) const {
return this->operator|(RHS);
}
APSInt operator^(const APSInt &RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
}
APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APSInt& RHS) const {
return this->operator^(RHS);
}
APSInt operator*(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned);
}
APSInt operator+(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned);
}
APSInt operator-(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned);
}
APSInt operator~() const {
return APSInt(~static_cast<const APInt&>(*this), IsUnsigned);
}
/// getMaxValue - Return the APSInt representing the maximum integer value
/// with the given bit width and signedness.
static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
return APSInt(Unsigned ? APInt::getMaxValue(numBits)
: APInt::getSignedMaxValue(numBits), Unsigned);
}
/// getMinValue - Return the APSInt representing the minimum integer value
/// with the given bit width and signedness.
static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
return APSInt(Unsigned ? APInt::getMinValue(numBits)
: APInt::getSignedMinValue(numBits), Unsigned);
}
/// \brief Determine if two APSInts have the same value, zero- or
/// sign-extending as needed.
static bool isSameValue(const APSInt &I1, const APSInt &I2) {
return !compareValues(I1, I2);
}
/// \brief Compare underlying values of two numbers.
static int compareValues(const APSInt &I1, const APSInt &I2) {
if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
return I1 == I2 ? 0 : I1 > I2 ? 1 : -1;
// Check for a bit-width mismatch.
if (I1.getBitWidth() > I2.getBitWidth())
return compareValues(I1, I2.extend(I1.getBitWidth()));
else if (I2.getBitWidth() > I1.getBitWidth())
return compareValues(I1.extend(I2.getBitWidth()), I2);
// We have a signedness mismatch. Check for negative values and do an
// unsigned compare if both are positive.
if (I1.isSigned()) {
assert(!I2.isSigned() && "Expected signed mismatch");
if (I1.isNegative())
return -1;
} else {
assert(I2.isSigned() && "Expected signed mismatch");
if (I2.isNegative())
return 1;
}
return I1.eq(I2) ? 0 : I1.ugt(I2) ? 1 : -1;
}
static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); }
/// Profile - Used to insert APSInt objects, or objects that contain APSInt
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& ID) const;
};
inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; }
inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; }
inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; }
inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; }
inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; }
inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; }
inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
I.print(OS, I.isSigned());
return OS;
}
} // end namespace llvm
#endif

View File

@ -1,396 +0,0 @@
//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ARRAYREF_H
#define LLVM_ADT_ARRAYREF_H
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
#include <vector>
namespace llvm {
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
///
/// This class does not own the underlying data, it is expected to be used in
/// situations where the data resides in some other buffer, whose lifetime
/// extends past that of the ArrayRef. For this reason, it is not in general
/// safe to store an ArrayRef.
///
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template<typename T>
class ArrayRef {
public:
typedef const T *iterator;
typedef const T *const_iterator;
typedef size_t size_type;
typedef std::reverse_iterator<iterator> reverse_iterator;
private:
/// The start of the array, in an external buffer.
const T *Data;
/// The number of elements.
size_type Length;
public:
/// @name Constructors
/// @{
/// Construct an empty ArrayRef.
/*implicit*/ ArrayRef() : Data(nullptr), Length(0) {}
/// Construct an empty ArrayRef from None.
/*implicit*/ ArrayRef(NoneType) : Data(nullptr), Length(0) {}
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt)
: Data(&OneElt), Length(1) {}
/// Construct an ArrayRef from a pointer and length.
/*implicit*/ ArrayRef(const T *data, size_t length)
: Data(data), Length(length) {}
/// Construct an ArrayRef from a range.
ArrayRef(const T *begin, const T *end)
: Data(begin), Length(end - begin) {}
/// Construct an ArrayRef from a SmallVector. This is templated in order to
/// avoid instantiating SmallVectorTemplateCommon<T> whenever we
/// copy-construct an ArrayRef.
template<typename U>
/*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
: Data(Vec.data()), Length(Vec.size()) {
}
/// Construct an ArrayRef from a std::vector.
template<typename A>
/*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a C array.
template <size_t N>
/*implicit*/ LLVM_CONSTEXPR ArrayRef(const T (&Arr)[N])
: Data(Arr), Length(N) {}
/// Construct an ArrayRef from a std::initializer_list.
/*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
: Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()),
Length(Vec.size()) {}
/// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
/// ensure that only ArrayRefs of pointers can be converted.
template <typename U>
ArrayRef(
const ArrayRef<U *> &A,
typename std::enable_if<
std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
: Data(A.data()), Length(A.size()) {}
/// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
/// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
/// whenever we copy-construct an ArrayRef.
template<typename U, typename DummyT>
/*implicit*/ ArrayRef(
const SmallVectorTemplateCommon<U *, DummyT> &Vec,
typename std::enable_if<
std::is_convertible<U *const *, T const *>::value>::type * = nullptr)
: Data(Vec.data()), Length(Vec.size()) {
}
/// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
/// to ensure that only vectors of pointers can be converted.
template<typename U, typename A>
ArrayRef(const std::vector<U *, A> &Vec,
typename std::enable_if<
std::is_convertible<U *const *, T const *>::value>::type* = 0)
: Data(Vec.data()), Length(Vec.size()) {}
/// @}
/// @name Simple Operations
/// @{
iterator begin() const { return Data; }
iterator end() const { return Data + Length; }
reverse_iterator rbegin() const { return reverse_iterator(end()); }
reverse_iterator rend() const { return reverse_iterator(begin()); }
/// empty - Check if the array is empty.
bool empty() const { return Length == 0; }
const T *data() const { return Data; }
/// size - Get the array size.
size_t size() const { return Length; }
/// front - Get the first element.
const T &front() const {
assert(!empty());
return Data[0];
}
/// back - Get the last element.
const T &back() const {
assert(!empty());
return Data[Length-1];
}
// copy - Allocate copy in Allocator and return ArrayRef<T> to it.
template <typename Allocator> ArrayRef<T> copy(Allocator &A) {
T *Buff = A.template Allocate<T>(Length);
std::uninitialized_copy(begin(), end(), Buff);
return ArrayRef<T>(Buff, Length);
}
/// equals - Check for element-wise equality.
bool equals(ArrayRef RHS) const {
if (Length != RHS.Length)
return false;
return std::equal(begin(), end(), RHS.begin());
}
/// slice(n) - Chop off the first N elements of the array.
ArrayRef<T> slice(size_t N) const {
assert(N <= size() && "Invalid specifier");
return ArrayRef<T>(data()+N, size()-N);
}
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
ArrayRef<T> slice(size_t N, size_t M) const {
assert(N+M <= size() && "Invalid specifier");
return ArrayRef<T>(data()+N, M);
}
/// \brief Drop the first \p N elements of the array.
ArrayRef<T> drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(N, size() - N);
}
/// \brief Drop the last \p N elements of the array.
ArrayRef<T> drop_back(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(0, size() - N);
}
/// @}
/// @name Operator Overloads
/// @{
const T &operator[](size_t Index) const {
assert(Index < Length && "Invalid index!");
return Data[Index];
}
/// @}
/// @name Expensive Operations
/// @{
std::vector<T> vec() const {
return std::vector<T>(Data, Data+Length);
}
/// @}
/// @name Conversion operators
/// @{
operator std::vector<T>() const {
return std::vector<T>(Data, Data+Length);
}
/// @}
};
/// MutableArrayRef - Represent a mutable reference to an array (0 or more
/// elements consecutively in memory), i.e. a start pointer and a length. It
/// allows various APIs to take and modify consecutive elements easily and
/// conveniently.
///
/// This class does not own the underlying data, it is expected to be used in
/// situations where the data resides in some other buffer, whose lifetime
/// extends past that of the MutableArrayRef. For this reason, it is not in
/// general safe to store a MutableArrayRef.
///
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template<typename T>
class MutableArrayRef : public ArrayRef<T> {
public:
typedef T *iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
/// Construct an empty MutableArrayRef.
/*implicit*/ MutableArrayRef() : ArrayRef<T>() {}
/// Construct an empty MutableArrayRef from None.
/*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {}
/// Construct an MutableArrayRef from a single element.
/*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
/// Construct an MutableArrayRef from a pointer and length.
/*implicit*/ MutableArrayRef(T *data, size_t length)
: ArrayRef<T>(data, length) {}
/// Construct an MutableArrayRef from a range.
MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
/// Construct an MutableArrayRef from a SmallVector.
/*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
: ArrayRef<T>(Vec) {}
/// Construct a MutableArrayRef from a std::vector.
/*implicit*/ MutableArrayRef(std::vector<T> &Vec)
: ArrayRef<T>(Vec) {}
/// Construct an MutableArrayRef from a C array.
template <size_t N>
/*implicit*/ LLVM_CONSTEXPR MutableArrayRef(T (&Arr)[N])
: ArrayRef<T>(Arr) {}
T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
iterator begin() const { return data(); }
iterator end() const { return data() + this->size(); }
reverse_iterator rbegin() const { return reverse_iterator(end()); }
reverse_iterator rend() const { return reverse_iterator(begin()); }
/// front - Get the first element.
T &front() const {
assert(!this->empty());
return data()[0];
}
/// back - Get the last element.
T &back() const {
assert(!this->empty());
return data()[this->size()-1];
}
/// slice(n) - Chop off the first N elements of the array.
MutableArrayRef<T> slice(size_t N) const {
assert(N <= this->size() && "Invalid specifier");
return MutableArrayRef<T>(data()+N, this->size()-N);
}
/// slice(n, m) - Chop off the first N elements of the array, and keep M
/// elements in the array.
MutableArrayRef<T> slice(size_t N, size_t M) const {
assert(N+M <= this->size() && "Invalid specifier");
return MutableArrayRef<T>(data()+N, M);
}
/// \brief Drop the first \p N elements of the array.
MutableArrayRef<T> drop_front(size_t N = 1) const {
assert(this->size() >= N && "Dropping more elements than exist");
return slice(N, this->size() - N);
}
MutableArrayRef<T> drop_back(size_t N = 1) const {
assert(this->size() >= N && "Dropping more elements than exist");
return slice(0, this->size() - N);
}
/// @}
/// @name Operator Overloads
/// @{
T &operator[](size_t Index) const {
assert(Index < this->size() && "Invalid index!");
return data()[Index];
}
};
/// @name ArrayRef Convenience constructors
/// @{
/// Construct an ArrayRef from a single element.
template<typename T>
ArrayRef<T> makeArrayRef(const T &OneElt) {
return OneElt;
}
/// Construct an ArrayRef from a pointer and length.
template<typename T>
ArrayRef<T> makeArrayRef(const T *data, size_t length) {
return ArrayRef<T>(data, length);
}
/// Construct an ArrayRef from a range.
template<typename T>
ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
return ArrayRef<T>(begin, end);
}
/// Construct an ArrayRef from a SmallVector.
template <typename T>
ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a SmallVector.
template <typename T, unsigned N>
ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a std::vector.
template<typename T>
ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from an ArrayRef (no-op) (const)
template <typename T> ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from an ArrayRef (no-op)
template <typename T> ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
return Vec;
}
/// Construct an ArrayRef from a C array.
template<typename T, size_t N>
ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
return ArrayRef<T>(Arr);
}
/// @}
/// @name ArrayRef Comparison Operators
/// @{
template<typename T>
inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
return LHS.equals(RHS);
}
template<typename T>
inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
return !(LHS == RHS);
}
/// @}
// ArrayRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <typename T> struct isPodLike<ArrayRef<T> > {
static const bool value = true;
};
template <typename T> hash_code hash_value(ArrayRef<T> S) {
return hash_combine_range(S.begin(), S.end());
}
} // end namespace llvm
#endif // LLVM_ADT_ARRAYREF_H

View File

@ -1,591 +0,0 @@
//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the BitVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstdint>
#include <cstdlib>
#include <cstring>
namespace llvm {
class BitVector {
typedef unsigned long BitWord;
enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
"Unsupported word size");
BitWord *Bits; // Actual bits.
unsigned Size; // Size of bitvector in bits.
unsigned Capacity; // Number of BitWords allocated in the Bits array.
public:
typedef unsigned size_type;
// Encapsulation of a single bit.
class reference {
friend class BitVector;
BitWord *WordRef;
unsigned BitPos;
reference(); // Undefined
public:
reference(BitVector &b, unsigned Idx) {
WordRef = &b.Bits[Idx / BITWORD_SIZE];
BitPos = Idx % BITWORD_SIZE;
}
reference(const reference&) = default;
reference &operator=(reference t) {
*this = bool(t);
return *this;
}
reference& operator=(bool t) {
if (t)
*WordRef |= BitWord(1) << BitPos;
else
*WordRef &= ~(BitWord(1) << BitPos);
return *this;
}
operator bool() const {
return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
}
};
/// BitVector default ctor - Creates an empty bitvector.
BitVector() : Size(0), Capacity(0) {
Bits = nullptr;
}
/// BitVector ctor - Creates a bitvector of specified number of bits. All
/// bits are initialized to the specified value.
explicit BitVector(unsigned s, bool t = false) : Size(s) {
Capacity = NumBitWords(s);
Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
init_words(Bits, Capacity, t);
if (t)
clear_unused_bits();
}
/// BitVector copy ctor.
BitVector(const BitVector &RHS) : Size(RHS.size()) {
if (Size == 0) {
Bits = nullptr;
Capacity = 0;
return;
}
Capacity = NumBitWords(RHS.size());
Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
BitVector(BitVector &&RHS)
: Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
RHS.Bits = nullptr;
RHS.Size = RHS.Capacity = 0;
}
~BitVector() {
std::free(Bits);
}
/// empty - Tests whether there are no bits in this bitvector.
bool empty() const { return Size == 0; }
/// size - Returns the number of bits in this bitvector.
size_type size() const { return Size; }
/// count - Returns the number of bits which are set.
size_type count() const {
unsigned NumBits = 0;
for (unsigned i = 0; i < NumBitWords(size()); ++i)
NumBits += countPopulation(Bits[i]);
return NumBits;
}
/// any - Returns true if any bit is set.
bool any() const {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
if (Bits[i] != 0)
return true;
return false;
}
/// all - Returns true if all bits are set.
bool all() const {
for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
if (Bits[i] != ~0UL)
return false;
// If bits remain check that they are ones. The unused bits are always zero.
if (unsigned Remainder = Size % BITWORD_SIZE)
return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
return true;
}
/// none - Returns true if none of the bits are set.
bool none() const {
return !any();
}
/// find_first - Returns the index of the first set bit, -1 if none
/// of the bits are set.
int find_first() const {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
if (Bits[i] != 0)
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
return -1;
}
/// find_next - Returns the index of the next set bit following the
/// "Prev" bit. Returns -1 if the next set bit is not found.
int find_next(unsigned Prev) const {
++Prev;
if (Prev >= Size)
return -1;
unsigned WordPos = Prev / BITWORD_SIZE;
unsigned BitPos = Prev % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off previous bits.
Copy &= ~0UL << BitPos;
if (Copy != 0)
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
// Check subsequent words.
for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i)
if (Bits[i] != 0)
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
return -1;
}
/// clear - Clear all bits.
void clear() {
Size = 0;
}
/// resize - Grow or shrink the bitvector.
void resize(unsigned N, bool t = false) {
if (N > Capacity * BITWORD_SIZE) {
unsigned OldCapacity = Capacity;
grow(N);
init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t);
}
// Set any old unused bits that are now included in the BitVector. This
// may set bits that are not included in the new vector, but we will clear
// them back out below.
if (N > Size)
set_unused_bits(t);
// Update the size, and clear out any bits that are now unused
unsigned OldSize = Size;
Size = N;
if (t || N < OldSize)
clear_unused_bits();
}
void reserve(unsigned N) {
if (N > Capacity * BITWORD_SIZE)
grow(N);
}
// Set, reset, flip
BitVector &set() {
init_words(Bits, Capacity, true);
clear_unused_bits();
return *this;
}
BitVector &set(unsigned Idx) {
assert(Bits && "Bits never allocated");
Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
return *this;
}
/// set - Efficiently set a range of bits in [I, E)
BitVector &set(unsigned I, unsigned E) {
assert(I <= E && "Attempted to set backwards range!");
assert(E <= size() && "Attempted to set out-of-bounds range!");
if (I == E) return *this;
if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
BitWord EMask = 1UL << (E % BITWORD_SIZE);
BitWord IMask = 1UL << (I % BITWORD_SIZE);
BitWord Mask = EMask - IMask;
Bits[I / BITWORD_SIZE] |= Mask;
return *this;
}
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
Bits[I / BITWORD_SIZE] |= PrefixMask;
I = alignTo(I, BITWORD_SIZE);
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
Bits[I / BITWORD_SIZE] = ~0UL;
BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
if (I < E)
Bits[I / BITWORD_SIZE] |= PostfixMask;
return *this;
}
BitVector &reset() {
init_words(Bits, Capacity, false);
return *this;
}
BitVector &reset(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
return *this;
}
/// reset - Efficiently reset a range of bits in [I, E)
BitVector &reset(unsigned I, unsigned E) {
assert(I <= E && "Attempted to reset backwards range!");
assert(E <= size() && "Attempted to reset out-of-bounds range!");
if (I == E) return *this;
if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
BitWord EMask = 1UL << (E % BITWORD_SIZE);
BitWord IMask = 1UL << (I % BITWORD_SIZE);
BitWord Mask = EMask - IMask;
Bits[I / BITWORD_SIZE] &= ~Mask;
return *this;
}
BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
Bits[I / BITWORD_SIZE] &= ~PrefixMask;
I = alignTo(I, BITWORD_SIZE);
for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
Bits[I / BITWORD_SIZE] = 0UL;
BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
if (I < E)
Bits[I / BITWORD_SIZE] &= ~PostfixMask;
return *this;
}
BitVector &flip() {
for (unsigned i = 0; i < NumBitWords(size()); ++i)
Bits[i] = ~Bits[i];
clear_unused_bits();
return *this;
}
BitVector &flip(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
return *this;
}
// Indexing.
reference operator[](unsigned Idx) {
assert (Idx < Size && "Out-of-bounds Bit access.");
return reference(*this, Idx);
}
bool operator[](unsigned Idx) const {
assert (Idx < Size && "Out-of-bounds Bit access.");
BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
}
bool test(unsigned Idx) const {
return (*this)[Idx];
}
/// Test if any common bits are set.
bool anyCommon(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
if (Bits[i] & RHS.Bits[i])
return true;
return false;
}
// Comparison operators.
bool operator==(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
if (Bits[i] != RHS.Bits[i])
return false;
// Verify that any extra words are all zeros.
if (i != ThisWords) {
for (; i != ThisWords; ++i)
if (Bits[i])
return false;
} else if (i != RHSWords) {
for (; i != RHSWords; ++i)
if (RHS.Bits[i])
return false;
}
return true;
}
bool operator!=(const BitVector &RHS) const {
return !(*this == RHS);
}
/// Intersection, union, disjoint union.
BitVector &operator&=(const BitVector &RHS) {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
Bits[i] &= RHS.Bits[i];
// Any bits that are just in this bitvector become zero, because they aren't
// in the RHS bit vector. Any words only in RHS are ignored because they
// are already zero in the LHS.
for (; i != ThisWords; ++i)
Bits[i] = 0;
return *this;
}
/// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
BitVector &reset(const BitVector &RHS) {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
Bits[i] &= ~RHS.Bits[i];
return *this;
}
/// test - Check if (This - RHS) is zero.
/// This is the same as reset(RHS) and any().
bool test(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
unsigned i;
for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
if ((Bits[i] & ~RHS.Bits[i]) != 0)
return true;
for (; i != ThisWords ; ++i)
if (Bits[i] != 0)
return true;
return false;
}
BitVector &operator|=(const BitVector &RHS) {
if (size() < RHS.size())
resize(RHS.size());
for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
Bits[i] |= RHS.Bits[i];
return *this;
}
BitVector &operator^=(const BitVector &RHS) {
if (size() < RHS.size())
resize(RHS.size());
for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
Bits[i] ^= RHS.Bits[i];
return *this;
}
// Assignment operator.
const BitVector &operator=(const BitVector &RHS) {
if (this == &RHS) return *this;
Size = RHS.size();
unsigned RHSWords = NumBitWords(Size);
if (Size <= Capacity * BITWORD_SIZE) {
if (Size)
std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord));
clear_unused_bits();
return *this;
}
// Grow the bitvector to have enough elements.
Capacity = RHSWords;
assert(Capacity > 0 && "negative capacity?");
BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord));
// Destroy the old bits.
std::free(Bits);
Bits = NewBits;
return *this;
}
const BitVector &operator=(BitVector &&RHS) {
if (this == &RHS) return *this;
std::free(Bits);
Bits = RHS.Bits;
Size = RHS.Size;
Capacity = RHS.Capacity;
RHS.Bits = nullptr;
RHS.Size = RHS.Capacity = 0;
return *this;
}
void swap(BitVector &RHS) {
std::swap(Bits, RHS.Bits);
std::swap(Size, RHS.Size);
std::swap(Capacity, RHS.Capacity);
}
//===--------------------------------------------------------------------===//
// Portable bit mask operations.
//===--------------------------------------------------------------------===//
//
// These methods all operate on arrays of uint32_t, each holding 32 bits. The
// fixed word size makes it easier to work with literal bit vector constants
// in portable code.
//
// The LSB in each word is the lowest numbered bit. The size of a portable
// bit mask is always a whole multiple of 32 bits. If no bit mask size is
// given, the bit mask is assumed to cover the entire BitVector.
/// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
/// This computes "*this |= Mask".
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<true, false>(Mask, MaskWords);
}
/// clearBitsInMask - Clear any bits in this vector that are set in Mask.
/// Don't resize. This computes "*this &= ~Mask".
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<false, false>(Mask, MaskWords);
}
/// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
/// Don't resize. This computes "*this |= ~Mask".
void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<true, true>(Mask, MaskWords);
}
/// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
/// Don't resize. This computes "*this &= Mask".
void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
applyMask<false, true>(Mask, MaskWords);
}
private:
unsigned NumBitWords(unsigned S) const {
return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
}
// Set the unused bits in the high words.
void set_unused_bits(bool t = true) {
// Set high words first.
unsigned UsedWords = NumBitWords(Size);
if (Capacity > UsedWords)
init_words(&Bits[UsedWords], (Capacity-UsedWords), t);
// Then set any stray high bits of the last used word.
unsigned ExtraBits = Size % BITWORD_SIZE;
if (ExtraBits) {
BitWord ExtraBitMask = ~0UL << ExtraBits;
if (t)
Bits[UsedWords-1] |= ExtraBitMask;
else
Bits[UsedWords-1] &= ~ExtraBitMask;
}
}
// Clear the unused bits in the high words.
void clear_unused_bits() {
set_unused_bits(false);
}
void grow(unsigned NewSize) {
Capacity = std::max(NumBitWords(NewSize), Capacity * 2);
assert(Capacity > 0 && "realloc-ing zero space");
Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord));
clear_unused_bits();
}
void init_words(BitWord *B, unsigned NumWords, bool t) {
memset(B, 0 - (int)t, NumWords*sizeof(BitWord));
}
template<bool AddBits, bool InvertMask>
void applyMask(const uint32_t *Mask, unsigned MaskWords) {
static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
MaskWords = std::min(MaskWords, (size() + 31) / 32);
const unsigned Scale = BITWORD_SIZE / 32;
unsigned i;
for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
BitWord BW = Bits[i];
// This inner loop should unroll completely when BITWORD_SIZE > 32.
for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
uint32_t M = *Mask++;
if (InvertMask) M = ~M;
if (AddBits) BW |= BitWord(M) << b;
else BW &= ~(BitWord(M) << b);
}
Bits[i] = BW;
}
for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
uint32_t M = *Mask++;
if (InvertMask) M = ~M;
if (AddBits) Bits[i] |= BitWord(M) << b;
else Bits[i] &= ~(BitWord(M) << b);
}
if (AddBits)
clear_unused_bits();
}
public:
/// Return the size (in bytes) of the bit vector.
size_t getMemorySize() const { return Capacity * sizeof(BitWord); }
};
static inline size_t capacity_in_bytes(const BitVector &X) {
return X.getMemorySize();
}
} // end namespace llvm
namespace std {
/// Implement std::swap in terms of BitVector swap.
inline void
swap(llvm::BitVector &LHS, llvm::BitVector &RHS) {
LHS.swap(RHS);
}
} // end namespace std
#endif // LLVM_ADT_BITVECTOR_H

View File

@ -1,153 +0,0 @@
//===-- llvm/ADT/BitmaskEnum.h ----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_BITMASKENUM_H
#define LLVM_ADT_BITMASKENUM_H
#include <cassert>
#include <type_traits>
#include <utility>
#include "llvm/Support/MathExtras.h"
/// LLVM_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you can
/// perform bitwise operations on it without putting static_cast everywhere.
///
/// \code
/// enum MyEnum {
/// E1 = 1, E2 = 2, E3 = 4, E4 = 8,
/// LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
/// };
///
/// void Foo() {
/// MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
/// }
/// \endcode
///
/// Normally when you do a bitwise operation on an enum value, you get back an
/// instance of the underlying type (e.g. int). But using this macro, bitwise
/// ops on your enum will return you back instances of the enum. This is
/// particularly useful for enums which represent a combination of flags.
///
/// The parameter to LLVM_MARK_AS_BITMASK_ENUM should be the largest individual
/// value in your enum.
///
/// All of the enum's values must be non-negative.
#define LLVM_MARK_AS_BITMASK_ENUM(LargestValue) \
LLVM_BITMASK_LARGEST_ENUMERATOR = LargestValue
/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() pulls the operator overloads used
/// by LLVM_MARK_AS_BITMASK_ENUM into the current namespace.
///
/// Suppose you have an enum foo::bar::MyEnum. Before using
/// LLVM_MARK_AS_BITMASK_ENUM on MyEnum, you must put
/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() somewhere inside namespace foo or
/// namespace foo::bar. This allows the relevant operator overloads to be found
/// by ADL.
///
/// You don't need to use this macro in namespace llvm; it's done at the bottom
/// of this file.
#define LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() \
using ::llvm::BitmaskEnumDetail::operator~; \
using ::llvm::BitmaskEnumDetail::operator|; \
using ::llvm::BitmaskEnumDetail::operator&; \
using ::llvm::BitmaskEnumDetail::operator^; \
using ::llvm::BitmaskEnumDetail::operator|=; \
using ::llvm::BitmaskEnumDetail::operator&=; \
/* Force a semicolon at the end of this macro. */ \
using ::llvm::BitmaskEnumDetail::operator^=
namespace llvm {
/// Traits class to determine whether an enum has a
/// LLVM_BITMASK_LARGEST_ENUMERATOR enumerator.
template <typename E, typename Enable = void>
struct is_bitmask_enum : std::false_type {};
template <typename E>
struct is_bitmask_enum<
E, typename std::enable_if<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >=
0>::type> : std::true_type {};
namespace BitmaskEnumDetail {
/// Get a bitmask with 1s in all places up to the high-order bit of E's largest
/// value.
template <typename E> typename std::underlying_type<E>::type Mask() {
// On overflow, NextPowerOf2 returns zero with the type uint64_t, so
// subtracting 1 gives us the mask with all bits set, like we want.
return NextPowerOf2(static_cast<typename std::underlying_type<E>::type>(
E::LLVM_BITMASK_LARGEST_ENUMERATOR)) -
1;
}
/// Check that Val is in range for E, and return Val cast to E's underlying
/// type.
template <typename E> typename std::underlying_type<E>::type Underlying(E Val) {
auto U = static_cast<typename std::underlying_type<E>::type>(Val);
assert(U >= 0 && "Negative enum values are not allowed.");
assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)");
return U;
}
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E operator~(E Val) {
return static_cast<E>(~Underlying(Val) & Mask<E>());
}
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E operator|(E LHS, E RHS) {
return static_cast<E>(Underlying(LHS) | Underlying(RHS));
}
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E operator&(E LHS, E RHS) {
return static_cast<E>(Underlying(LHS) & Underlying(RHS));
}
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E operator^(E LHS, E RHS) {
return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
}
// |=, &=, and ^= return a reference to LHS, to match the behavior of the
// operators on builtin types.
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E &operator|=(E &LHS, E RHS) {
LHS = LHS | RHS;
return LHS;
}
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E &operator&=(E &LHS, E RHS) {
LHS = LHS & RHS;
return LHS;
}
template <typename E,
typename = typename std::enable_if<is_bitmask_enum<E>::value>::type>
E &operator^=(E &LHS, E RHS) {
LHS = LHS ^ RHS;
return LHS;
}
} // namespace BitmaskEnumDetail
// Enable bitmask enums in namespace ::llvm and all nested namespaces.
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
} // namespace llvm
#endif

View File

@ -1,77 +0,0 @@
//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
#define LLVM_ADT_DAGDELTAALGORITHM_H
#include <set>
#include <vector>
namespace llvm {
/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
/// directed acyclic graphs using a predicate function.
///
/// The result of the algorithm is a subset of the input change set which is
/// guaranteed to satisfy the predicate, assuming that the input set did. For
/// well formed predicates, the result set is guaranteed to be such that
/// removing any single element not required by the dependencies on the other
/// elements would falsify the predicate.
///
/// The DAG should be used to represent dependencies in the changes which are
/// likely to hold across the predicate function. That is, for a particular
/// changeset S and predicate P:
///
/// P(S) => P(S union pred(S))
///
/// The minization algorithm uses this dependency information to attempt to
/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
/// is not required to satisfy this property, but the algorithm will run
/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
/// for more information on the properties which the predicate function itself
/// should satisfy.
class DAGDeltaAlgorithm {
virtual void anchor();
public:
typedef unsigned change_ty;
typedef std::pair<change_ty, change_ty> edge_ty;
// FIXME: Use a decent data structure.
typedef std::set<change_ty> changeset_ty;
typedef std::vector<changeset_ty> changesetlist_ty;
public:
virtual ~DAGDeltaAlgorithm() {}
/// Run - Minimize the DAG formed by the \p Changes vertices and the
/// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
/// changes and returning the smallest set which still satisfies the test
/// predicate and the input \p Dependencies.
///
/// \param Changes The list of changes.
///
/// \param Dependencies The list of dependencies amongst changes. For each
/// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
/// minimization algorithm guarantees that for each tested changed set S,
/// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
/// dependencies.
changeset_ty Run(const changeset_ty &Changes,
const std::vector<edge_ty> &Dependencies);
/// UpdatedSearchState - Callback used when the search state changes.
virtual void UpdatedSearchState(const changeset_ty &Changes,
const changesetlist_ty &Sets,
const changeset_ty &Required) {}
/// ExecuteOneTest - Execute a single test predicate on the change set \p S.
virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
};
} // end namespace llvm
#endif

View File

@ -1,93 +0,0 @@
//===--- DeltaAlgorithm.h - A Set Minimization Algorithm -------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_DELTAALGORITHM_H
#define LLVM_ADT_DELTAALGORITHM_H
#include <set>
#include <vector>
namespace llvm {
/// DeltaAlgorithm - Implements the delta debugging algorithm (A. Zeller '99)
/// for minimizing arbitrary sets using a predicate function.
///
/// The result of the algorithm is a subset of the input change set which is
/// guaranteed to satisfy the predicate, assuming that the input set did. For
/// well formed predicates, the result set is guaranteed to be such that
/// removing any single element would falsify the predicate.
///
/// For best results the predicate function *should* (but need not) satisfy
/// certain properties, in particular:
/// (1) The predicate should return false on an empty set and true on the full
/// set.
/// (2) If the predicate returns true for a set of changes, it should return
/// true for all supersets of that set.
///
/// It is not an error to provide a predicate that does not satisfy these
/// requirements, and the algorithm will generally produce reasonable
/// results. However, it may run substantially more tests than with a good
/// predicate.
class DeltaAlgorithm {
public:
typedef unsigned change_ty;
// FIXME: Use a decent data structure.
typedef std::set<change_ty> changeset_ty;
typedef std::vector<changeset_ty> changesetlist_ty;
private:
/// Cache of failed test results. Successful test results are never cached
/// since we always reduce following a success.
std::set<changeset_ty> FailedTestsCache;
/// GetTestResult - Get the test result for the \p Changes from the
/// cache, executing the test if necessary.
///
/// \param Changes - The change set to test.
/// \return - The test result.
bool GetTestResult(const changeset_ty &Changes);
/// Split - Partition a set of changes \p S into one or two subsets.
void Split(const changeset_ty &S, changesetlist_ty &Res);
/// Delta - Minimize a set of \p Changes which has been partioned into
/// smaller sets, by attempting to remove individual subsets.
changeset_ty Delta(const changeset_ty &Changes,
const changesetlist_ty &Sets);
/// Search - Search for a subset (or subsets) in \p Sets which can be
/// removed from \p Changes while still satisfying the predicate.
///
/// \param Res - On success, a subset of Changes which satisfies the
/// predicate.
/// \return - True on success.
bool Search(const changeset_ty &Changes, const changesetlist_ty &Sets,
changeset_ty &Res);
protected:
/// UpdatedSearchState - Callback used when the search state changes.
virtual void UpdatedSearchState(const changeset_ty &Changes,
const changesetlist_ty &Sets) {}
/// ExecuteOneTest - Execute a single test predicate on the change set \p S.
virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
DeltaAlgorithm& operator=(const DeltaAlgorithm&) = default;
public:
virtual ~DeltaAlgorithm();
/// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
/// subsets of changes and returning the smallest set which still satisfies
/// the test predicate.
changeset_ty Run(const changeset_ty &Changes);
};
} // end namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,251 +0,0 @@
//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines DenseMapInfo traits for DenseMap.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_DENSEMAPINFO_H
#define LLVM_ADT_DENSEMAPINFO_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
namespace llvm {
template<typename T>
struct DenseMapInfo {
//static inline T getEmptyKey();
//static inline T getTombstoneKey();
//static unsigned getHashValue(const T &Val);
//static bool isEqual(const T &LHS, const T &RHS);
};
template <typename T> struct CachedHash {
CachedHash(T Val) : Val(std::move(Val)) {
Hash = DenseMapInfo<T>::getHashValue(Val);
}
CachedHash(T Val, unsigned Hash) : Val(std::move(Val)), Hash(Hash) {}
T Val;
unsigned Hash;
};
// Provide DenseMapInfo for all CachedHash<T>.
template <typename T> struct DenseMapInfo<CachedHash<T>> {
static CachedHash<T> getEmptyKey() {
T N = DenseMapInfo<T>::getEmptyKey();
return {N, 0};
}
static CachedHash<T> getTombstoneKey() {
T N = DenseMapInfo<T>::getTombstoneKey();
return {N, 0};
}
static unsigned getHashValue(CachedHash<T> Val) {
assert(!isEqual(Val, getEmptyKey()) && "Cannot hash the empty key!");
assert(!isEqual(Val, getTombstoneKey()) &&
"Cannot hash the tombstone key!");
return Val.Hash;
}
static bool isEqual(CachedHash<T> A, CachedHash<T> B) {
return DenseMapInfo<T>::isEqual(A.Val, B.Val);
}
};
// Provide DenseMapInfo for all pointers.
template<typename T>
struct DenseMapInfo<T*> {
static inline T* getEmptyKey() {
uintptr_t Val = static_cast<uintptr_t>(-1);
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static inline T* getTombstoneKey() {
uintptr_t Val = static_cast<uintptr_t>(-2);
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static unsigned getHashValue(const T *PtrVal) {
return (unsigned((uintptr_t)PtrVal) >> 4) ^
(unsigned((uintptr_t)PtrVal) >> 9);
}
static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
};
// Provide DenseMapInfo for chars.
template<> struct DenseMapInfo<char> {
static inline char getEmptyKey() { return ~0; }
static inline char getTombstoneKey() { return ~0 - 1; }
static unsigned getHashValue(const char& Val) { return Val * 37U; }
static bool isEqual(const char &LHS, const char &RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned ints.
template<> struct DenseMapInfo<unsigned> {
static inline unsigned getEmptyKey() { return ~0U; }
static inline unsigned getTombstoneKey() { return ~0U - 1; }
static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned longs.
template<> struct DenseMapInfo<unsigned long> {
static inline unsigned long getEmptyKey() { return ~0UL; }
static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
static unsigned getHashValue(const unsigned long& Val) {
return (unsigned)(Val * 37UL);
}
static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for unsigned long longs.
template<> struct DenseMapInfo<unsigned long long> {
static inline unsigned long long getEmptyKey() { return ~0ULL; }
static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
static unsigned getHashValue(const unsigned long long& Val) {
return (unsigned)(Val * 37ULL);
}
static bool isEqual(const unsigned long long& LHS,
const unsigned long long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for ints.
template<> struct DenseMapInfo<int> {
static inline int getEmptyKey() { return 0x7fffffff; }
static inline int getTombstoneKey() { return -0x7fffffff - 1; }
static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
static bool isEqual(const int& LHS, const int& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for longs.
template<> struct DenseMapInfo<long> {
static inline long getEmptyKey() {
return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
}
static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
static unsigned getHashValue(const long& Val) {
return (unsigned)(Val * 37UL);
}
static bool isEqual(const long& LHS, const long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for long longs.
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
static unsigned getHashValue(const long long& Val) {
return (unsigned)(Val * 37ULL);
}
static bool isEqual(const long long& LHS,
const long long& RHS) {
return LHS == RHS;
}
};
// Provide DenseMapInfo for all pairs whose members have info.
template<typename T, typename U>
struct DenseMapInfo<std::pair<T, U> > {
typedef std::pair<T, U> Pair;
typedef DenseMapInfo<T> FirstInfo;
typedef DenseMapInfo<U> SecondInfo;
static inline Pair getEmptyKey() {
return std::make_pair(FirstInfo::getEmptyKey(),
SecondInfo::getEmptyKey());
}
static inline Pair getTombstoneKey() {
return std::make_pair(FirstInfo::getTombstoneKey(),
SecondInfo::getTombstoneKey());
}
static unsigned getHashValue(const Pair& PairVal) {
uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
| (uint64_t)SecondInfo::getHashValue(PairVal.second);
key += ~(key << 32);
key ^= (key >> 22);
key += ~(key << 13);
key ^= (key >> 8);
key += (key << 3);
key ^= (key >> 15);
key += ~(key << 27);
key ^= (key >> 31);
return (unsigned)key;
}
static bool isEqual(const Pair &LHS, const Pair &RHS) {
return FirstInfo::isEqual(LHS.first, RHS.first) &&
SecondInfo::isEqual(LHS.second, RHS.second);
}
};
// Provide DenseMapInfo for StringRefs.
template <> struct DenseMapInfo<StringRef> {
static inline StringRef getEmptyKey() {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
0);
}
static inline StringRef getTombstoneKey() {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
0);
}
static unsigned getHashValue(StringRef Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
static bool isEqual(StringRef LHS, StringRef RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
if (RHS.data() == getTombstoneKey().data())
return LHS.data() == getTombstoneKey().data();
return LHS == RHS;
}
};
// Provide DenseMapInfo for ArrayRefs.
template <typename T> struct DenseMapInfo<ArrayRef<T>> {
static inline ArrayRef<T> getEmptyKey() {
return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)),
size_t(0));
}
static inline ArrayRef<T> getTombstoneKey() {
return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)),
size_t(0));
}
static unsigned getHashValue(ArrayRef<T> Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
if (RHS.data() == getTombstoneKey().data())
return LHS.data() == getTombstoneKey().data();
return LHS == RHS;
}
};
} // end namespace llvm
#endif

View File

@ -1,180 +0,0 @@
//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the DenseSet class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_DENSESET_H
#define LLVM_ADT_DENSESET_H
#include "llvm/ADT/DenseMap.h"
namespace llvm {
namespace detail {
struct DenseSetEmpty {};
// Use the empty base class trick so we can create a DenseMap where the buckets
// contain only a single item.
template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
KeyT key;
public:
KeyT &getFirst() { return key; }
const KeyT &getFirst() const { return key; }
DenseSetEmpty &getSecond() { return *this; }
const DenseSetEmpty &getSecond() const { return *this; }
};
}
/// DenseSet - This implements a dense probed hash-table based set.
template<typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT> >
class DenseSet {
typedef DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
detail::DenseSetPair<ValueT>> MapTy;
static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
"DenseMap buckets unexpectedly large!");
MapTy TheMap;
public:
typedef ValueT key_type;
typedef ValueT value_type;
typedef unsigned size_type;
explicit DenseSet(unsigned NumInitBuckets = 0) : TheMap(NumInitBuckets) {}
bool empty() const { return TheMap.empty(); }
size_type size() const { return TheMap.size(); }
size_t getMemorySize() const { return TheMap.getMemorySize(); }
/// Grow the DenseSet so that it has at least Size buckets. Will not shrink
/// the Size of the set.
void resize(size_t Size) { TheMap.resize(Size); }
void clear() {
TheMap.clear();
}
/// Return 1 if the specified key is in the set, 0 otherwise.
size_type count(const ValueT &V) const {
return TheMap.count(V);
}
bool erase(const ValueT &V) {
return TheMap.erase(V);
}
void swap(DenseSet& RHS) {
TheMap.swap(RHS.TheMap);
}
// Iterators.
class Iterator {
typename MapTy::iterator I;
friend class DenseSet;
public:
typedef typename MapTy::iterator::difference_type difference_type;
typedef ValueT value_type;
typedef value_type *pointer;
typedef value_type &reference;
typedef std::forward_iterator_tag iterator_category;
Iterator(const typename MapTy::iterator &i) : I(i) {}
ValueT &operator*() { return I->getFirst(); }
ValueT *operator->() { return &I->getFirst(); }
Iterator& operator++() { ++I; return *this; }
Iterator operator++(int) { auto T = *this; ++I; return T; }
bool operator==(const Iterator& X) const { return I == X.I; }
bool operator!=(const Iterator& X) const { return I != X.I; }
};
class ConstIterator {
typename MapTy::const_iterator I;
friend class DenseSet;
public:
typedef typename MapTy::const_iterator::difference_type difference_type;
typedef ValueT value_type;
typedef value_type *pointer;
typedef value_type &reference;
typedef std::forward_iterator_tag iterator_category;
ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
const ValueT &operator*() { return I->getFirst(); }
const ValueT *operator->() { return &I->getFirst(); }
ConstIterator& operator++() { ++I; return *this; }
ConstIterator operator++(int) { auto T = *this; ++I; return T; }
bool operator==(const ConstIterator& X) const { return I == X.I; }
bool operator!=(const ConstIterator& X) const { return I != X.I; }
};
typedef Iterator iterator;
typedef ConstIterator const_iterator;
iterator begin() { return Iterator(TheMap.begin()); }
iterator end() { return Iterator(TheMap.end()); }
const_iterator begin() const { return ConstIterator(TheMap.begin()); }
const_iterator end() const { return ConstIterator(TheMap.end()); }
iterator find(const ValueT &V) { return Iterator(TheMap.find(V)); }
/// Alternative version of find() which allows a different, and possibly less
/// expensive, key type.
/// The DenseMapInfo is responsible for supplying methods
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
/// used.
template <class LookupKeyT>
iterator find_as(const LookupKeyT &Val) {
return Iterator(TheMap.find_as(Val));
}
template <class LookupKeyT>
const_iterator find_as(const LookupKeyT &Val) const {
return ConstIterator(TheMap.find_as(Val));
}
void erase(Iterator I) { return TheMap.erase(I.I); }
void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
std::pair<iterator, bool> insert(const ValueT &V) {
detail::DenseSetEmpty Empty;
return TheMap.insert(std::make_pair(V, Empty));
}
/// Alternative version of insert that uses a different (and possibly less
/// expensive) key type.
template <typename LookupKeyT>
std::pair<iterator, bool> insert_as(const ValueT &V,
const LookupKeyT &LookupKey) {
return insert_as(ValueT(V), LookupKey);
}
template <typename LookupKeyT>
std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
detail::DenseSetEmpty Empty;
return TheMap.insert_as(std::make_pair(std::move(V), Empty), LookupKey);
}
// Range insertion of values.
template<typename InputIt>
void insert(InputIt I, InputIt E) {
for (; I != E; ++I)
insert(*I);
}
};
} // end namespace llvm
#endif

View File

@ -1,291 +0,0 @@
//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file builds on the ADT/GraphTraits.h file to build generic depth
// first graph iterator. This file exposes the following functions/types:
//
// df_begin/df_end/df_iterator
// * Normal depth-first iteration - visit a node and then all of its children.
//
// idf_begin/idf_end/idf_iterator
// * Depth-first iteration on the 'inverse' graph.
//
// df_ext_begin/df_ext_end/df_ext_iterator
// * Normal depth-first iteration - visit a node and then all of its children.
// This iterator stores the 'visited' set in an external set, which allows
// it to be more efficient, and allows external clients to use the set for
// other purposes.
//
// idf_ext_begin/idf_ext_end/idf_ext_iterator
// * Depth-first iteration on the 'inverse' graph.
// This iterator stores the 'visited' set in an external set, which allows
// it to be more efficient, and allows external clients to use the set for
// other purposes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
#define LLVM_ADT_DEPTHFIRSTITERATOR_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include <set>
#include <vector>
namespace llvm {
// df_iterator_storage - A private class which is used to figure out where to
// store the visited set.
template<class SetType, bool External> // Non-external set
class df_iterator_storage {
public:
SetType Visited;
};
template<class SetType>
class df_iterator_storage<SetType, true> {
public:
df_iterator_storage(SetType &VSet) : Visited(VSet) {}
df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}
SetType &Visited;
};
// Generic Depth First Iterator
template<class GraphT,
class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
bool ExtStorage = false, class GT = GraphTraits<GraphT> >
class df_iterator : public std::iterator<std::forward_iterator_tag,
typename GT::NodeType, ptrdiff_t>,
public df_iterator_storage<SetType, ExtStorage> {
typedef std::iterator<std::forward_iterator_tag,
typename GT::NodeType, ptrdiff_t> super;
typedef typename GT::NodeType NodeType;
typedef typename GT::ChildIteratorType ChildItTy;
typedef PointerIntPair<NodeType*, 1> PointerIntTy;
// VisitStack - Used to maintain the ordering. Top = current block
// First element is node pointer, second is the 'next child' to visit
// if the int in PointerIntTy is 0, the 'next child' to visit is invalid
std::vector<std::pair<PointerIntTy, ChildItTy>> VisitStack;
private:
inline df_iterator(NodeType *Node) {
this->Visited.insert(Node);
VisitStack.push_back(
std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node)));
}
inline df_iterator() {
// End is when stack is empty
}
inline df_iterator(NodeType *Node, SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
if (!S.count(Node)) {
VisitStack.push_back(
std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node)));
this->Visited.insert(Node);
}
}
inline df_iterator(SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
// End is when stack is empty
}
inline void toNext() {
do {
std::pair<PointerIntTy, ChildItTy> &Top = VisitStack.back();
NodeType *Node = Top.first.getPointer();
ChildItTy &It = Top.second;
if (!Top.first.getInt()) {
// now retrieve the real begin of the children before we dive in
It = GT::child_begin(Node);
Top.first.setInt(1);
}
while (It != GT::child_end(Node)) {
NodeType *Next = *It++;
// Has our next sibling been visited?
if (Next && this->Visited.insert(Next).second) {
// No, do it now.
VisitStack.push_back(
std::make_pair(PointerIntTy(Next, 0), GT::child_begin(Next)));
return;
}
}
// Oops, ran out of successors... go up a level on the stack.
VisitStack.pop_back();
} while (!VisitStack.empty());
}
public:
typedef typename super::pointer pointer;
// Provide static begin and end methods as our public "constructors"
static df_iterator begin(const GraphT &G) {
return df_iterator(GT::getEntryNode(G));
}
static df_iterator end(const GraphT &G) { return df_iterator(); }
// Static begin and end methods as our public ctors for external iterators
static df_iterator begin(const GraphT &G, SetType &S) {
return df_iterator(GT::getEntryNode(G), S);
}
static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); }
bool operator==(const df_iterator &x) const {
return VisitStack == x.VisitStack;
}
bool operator!=(const df_iterator &x) const { return !(*this == x); }
pointer operator*() const { return VisitStack.back().first.getPointer(); }
// This is a nonstandard operator-> that dereferences the pointer an extra
// time... so that you can actually call methods ON the Node, because
// the contained type is a pointer. This allows BBIt->getTerminator() f.e.
//
NodeType *operator->() const { return **this; }
df_iterator &operator++() { // Preincrement
toNext();
return *this;
}
/// \brief Skips all children of the current node and traverses to next node
///
/// Note: This function takes care of incrementing the iterator. If you
/// always increment and call this function, you risk walking off the end.
df_iterator &skipChildren() {
VisitStack.pop_back();
if (!VisitStack.empty())
toNext();
return *this;
}
df_iterator operator++(int) { // Postincrement
df_iterator tmp = *this;
++*this;
return tmp;
}
// nodeVisited - return true if this iterator has already visited the
// specified node. This is public, and will probably be used to iterate over
// nodes that a depth first iteration did not find: ie unreachable nodes.
//
bool nodeVisited(NodeType *Node) const {
return this->Visited.count(Node) != 0;
}
/// getPathLength - Return the length of the path from the entry node to the
/// current node, counting both nodes.
unsigned getPathLength() const { return VisitStack.size(); }
/// getPath - Return the n'th node in the path from the entry node to the
/// current node.
NodeType *getPath(unsigned n) const {
return VisitStack[n].first.getPointer();
}
};
// Provide global constructors that automatically figure out correct types...
//
template <class T>
df_iterator<T> df_begin(const T& G) {
return df_iterator<T>::begin(G);
}
template <class T>
df_iterator<T> df_end(const T& G) {
return df_iterator<T>::end(G);
}
// Provide an accessor method to use them in range-based patterns.
template <class T>
iterator_range<df_iterator<T>> depth_first(const T& G) {
return make_range(df_begin(G), df_end(G));
}
// Provide global definitions of external depth first iterators...
template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
struct df_ext_iterator : public df_iterator<T, SetTy, true> {
df_ext_iterator(const df_iterator<T, SetTy, true> &V)
: df_iterator<T, SetTy, true>(V) {}
};
template <class T, class SetTy>
df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) {
return df_ext_iterator<T, SetTy>::begin(G, S);
}
template <class T, class SetTy>
df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) {
return df_ext_iterator<T, SetTy>::end(G, S);
}
template <class T, class SetTy>
iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
SetTy &S) {
return make_range(df_ext_begin(G, S), df_ext_end(G, S));
}
// Provide global definitions of inverse depth first iterators...
template <class T,
class SetTy = llvm::SmallPtrSet<typename GraphTraits<T>::NodeType*, 8>,
bool External = false>
struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
: df_iterator<Inverse<T>, SetTy, External>(V) {}
};
template <class T>
idf_iterator<T> idf_begin(const T& G) {
return idf_iterator<T>::begin(Inverse<T>(G));
}
template <class T>
idf_iterator<T> idf_end(const T& G){
return idf_iterator<T>::end(Inverse<T>(G));
}
// Provide an accessor method to use them in range-based patterns.
template <class T>
iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
return make_range(idf_begin(G), idf_end(G));
}
// Provide global definitions of external inverse depth first iterators...
template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
: idf_iterator<T, SetTy, true>(V) {}
idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
: idf_iterator<T, SetTy, true>(V) {}
};
template <class T, class SetTy>
idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) {
return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S);
}
template <class T, class SetTy>
idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) {
return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S);
}
template <class T, class SetTy>
iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
SetTy &S) {
return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
}
} // End llvm namespace
#endif

View File

@ -1,99 +0,0 @@
//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
// These can be used to write iterators that are fail-fast when LLVM is built
// with asserts enabled.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_EPOCH_TRACKER_H
#define LLVM_ADT_EPOCH_TRACKER_H
#include "llvm/Config/llvm-config.h"
#include <cstdint>
namespace llvm {
#ifndef LLVM_ENABLE_ABI_BREAKING_CHECKS
class DebugEpochBase {
public:
void incrementEpoch() {}
class HandleBase {
public:
HandleBase() = default;
explicit HandleBase(const DebugEpochBase *) {}
bool isHandleInSync() const { return true; }
const void *getEpochAddress() const { return nullptr; }
};
};
#else
/// \brief A base class for data structure classes wishing to make iterators
/// ("handles") pointing into themselves fail-fast. When building without
/// asserts, this class is empty and does nothing.
///
/// DebugEpochBase does not by itself track handles pointing into itself. The
/// expectation is that routines touching the handles will poll on
/// isHandleInSync at appropriate points to assert that the handle they're using
/// is still valid.
///
class DebugEpochBase {
uint64_t Epoch;
public:
DebugEpochBase() : Epoch(0) {}
/// \brief Calling incrementEpoch invalidates all handles pointing into the
/// calling instance.
void incrementEpoch() { ++Epoch; }
/// \brief The destructor calls incrementEpoch to make use-after-free bugs
/// more likely to crash deterministically.
~DebugEpochBase() { incrementEpoch(); }
/// \brief A base class for iterator classes ("handles") that wish to poll for
/// iterator invalidating modifications in the underlying data structure.
/// When LLVM is built without asserts, this class is empty and does nothing.
///
/// HandleBase does not track the parent data structure by itself. It expects
/// the routines modifying the data structure to call incrementEpoch when they
/// make an iterator-invalidating modification.
///
class HandleBase {
const uint64_t *EpochAddress;
uint64_t EpochAtCreation;
public:
HandleBase() : EpochAddress(nullptr), EpochAtCreation(UINT64_MAX) {}
explicit HandleBase(const DebugEpochBase *Parent)
: EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
/// \brief Returns true if the DebugEpochBase this Handle is linked to has
/// not called incrementEpoch on itself since the creation of this
/// HandleBase instance.
bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
/// \brief Returns a pointer to the epoch word stored in the data structure
/// this handle points into. Can be used to check if two iterators point
/// into the same data structure.
const void *getEpochAddress() const { return EpochAddress; }
};
};
#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS
} // namespace llvm
#endif

View File

@ -1,283 +0,0 @@
//===-- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Generic implementation of equivalence classes through the use Tarjan's
// efficient union-find algorithm.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
#define LLVM_ADT_EQUIVALENCECLASSES_H
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstddef>
#include <set>
namespace llvm {
/// EquivalenceClasses - This represents a collection of equivalence classes and
/// supports three efficient operations: insert an element into a class of its
/// own, union two classes, and find the class for a given element. In
/// addition to these modification methods, it is possible to iterate over all
/// of the equivalence classes and all of the elements in a class.
///
/// This implementation is an efficient implementation that only stores one copy
/// of the element being indexed per entry in the set, and allows any arbitrary
/// type to be indexed (as long as it can be ordered with operator<).
///
/// Here is a simple example using integers:
///
/// \code
/// EquivalenceClasses<int> EC;
/// EC.unionSets(1, 2); // insert 1, 2 into the same set
/// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets
/// EC.unionSets(5, 1); // merge the set for 1 with 5's set.
///
/// for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end();
/// I != E; ++I) { // Iterate over all of the equivalence sets.
/// if (!I->isLeader()) continue; // Ignore non-leader sets.
/// for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I);
/// MI != EC.member_end(); ++MI) // Loop over members in this set.
/// cerr << *MI << " "; // Print member.
/// cerr << "\n"; // Finish set.
/// }
/// \endcode
///
/// This example prints:
/// 4
/// 5 1 2
///
template <class ElemTy>
class EquivalenceClasses {
/// ECValue - The EquivalenceClasses data structure is just a set of these.
/// Each of these represents a relation for a value. First it stores the
/// value itself, which provides the ordering that the set queries. Next, it
/// provides a "next pointer", which is used to enumerate all of the elements
/// in the unioned set. Finally, it defines either a "end of list pointer" or
/// "leader pointer" depending on whether the value itself is a leader. A
/// "leader pointer" points to the node that is the leader for this element,
/// if the node is not a leader. A "end of list pointer" points to the last
/// node in the list of members of this list. Whether or not a node is a
/// leader is determined by a bit stolen from one of the pointers.
class ECValue {
friend class EquivalenceClasses;
mutable const ECValue *Leader, *Next;
ElemTy Data;
// ECValue ctor - Start out with EndOfList pointing to this node, Next is
// Null, isLeader = true.
ECValue(const ElemTy &Elt)
: Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {}
const ECValue *getLeader() const {
if (isLeader()) return this;
if (Leader->isLeader()) return Leader;
// Path compression.
return Leader = Leader->getLeader();
}
const ECValue *getEndOfList() const {
assert(isLeader() && "Cannot get the end of a list for a non-leader!");
return Leader;
}
void setNext(const ECValue *NewNext) const {
assert(getNext() == nullptr && "Already has a next pointer!");
Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
}
public:
ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
Data(RHS.Data) {
// Only support copying of singleton nodes.
assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
}
bool operator<(const ECValue &UFN) const { return Data < UFN.Data; }
bool isLeader() const { return (intptr_t)Next & 1; }
const ElemTy &getData() const { return Data; }
const ECValue *getNext() const {
return (ECValue*)((intptr_t)Next & ~(intptr_t)1);
}
template<typename T>
bool operator<(const T &Val) const { return Data < Val; }
};
/// TheMapping - This implicitly provides a mapping from ElemTy values to the
/// ECValues, it just keeps the key as part of the value.
std::set<ECValue> TheMapping;
public:
EquivalenceClasses() {}
EquivalenceClasses(const EquivalenceClasses &RHS) {
operator=(RHS);
}
const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) {
TheMapping.clear();
for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
if (I->isLeader()) {
member_iterator MI = RHS.member_begin(I);
member_iterator LeaderIt = member_begin(insert(*MI));
for (++MI; MI != member_end(); ++MI)
unionSets(LeaderIt, member_begin(insert(*MI)));
}
return *this;
}
//===--------------------------------------------------------------------===//
// Inspection methods
//
/// iterator* - Provides a way to iterate over all values in the set.
typedef typename std::set<ECValue>::const_iterator iterator;
iterator begin() const { return TheMapping.begin(); }
iterator end() const { return TheMapping.end(); }
bool empty() const { return TheMapping.empty(); }
/// member_* Iterate over the members of an equivalence class.
///
class member_iterator;
member_iterator member_begin(iterator I) const {
// Only leaders provide anything to iterate over.
return member_iterator(I->isLeader() ? &*I : nullptr);
}
member_iterator member_end() const {
return member_iterator(nullptr);
}
/// findValue - Return an iterator to the specified value. If it does not
/// exist, end() is returned.
iterator findValue(const ElemTy &V) const {
return TheMapping.find(V);
}
/// getLeaderValue - Return the leader for the specified value that is in the
/// set. It is an error to call this method for a value that is not yet in
/// the set. For that, call getOrInsertLeaderValue(V).
const ElemTy &getLeaderValue(const ElemTy &V) const {
member_iterator MI = findLeader(V);
assert(MI != member_end() && "Value is not in the set!");
return *MI;
}
/// getOrInsertLeaderValue - Return the leader for the specified value that is
/// in the set. If the member is not in the set, it is inserted, then
/// returned.
const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
member_iterator MI = findLeader(insert(V));
assert(MI != member_end() && "Value is not in the set!");
return *MI;
}
/// getNumClasses - Return the number of equivalence classes in this set.
/// Note that this is a linear time operation.
unsigned getNumClasses() const {
unsigned NC = 0;
for (iterator I = begin(), E = end(); I != E; ++I)
if (I->isLeader()) ++NC;
return NC;
}
//===--------------------------------------------------------------------===//
// Mutation methods
/// insert - Insert a new value into the union/find set, ignoring the request
/// if the value already exists.
iterator insert(const ElemTy &Data) {
return TheMapping.insert(ECValue(Data)).first;
}
/// findLeader - Given a value in the set, return a member iterator for the
/// equivalence class it is in. This does the path-compression part that
/// makes union-find "union findy". This returns an end iterator if the value
/// is not in the equivalence class.
///
member_iterator findLeader(iterator I) const {
if (I == TheMapping.end()) return member_end();
return member_iterator(I->getLeader());
}
member_iterator findLeader(const ElemTy &V) const {
return findLeader(TheMapping.find(V));
}
/// union - Merge the two equivalence sets for the specified values, inserting
/// them if they do not already exist in the equivalence set.
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
iterator V1I = insert(V1), V2I = insert(V2);
return unionSets(findLeader(V1I), findLeader(V2I));
}
member_iterator unionSets(member_iterator L1, member_iterator L2) {
assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!");
if (L1 == L2) return L1; // Unifying the same two sets, noop.
// Otherwise, this is a real union operation. Set the end of the L1 list to
// point to the L2 leader node.
const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node;
L1LV.getEndOfList()->setNext(&L2LV);
// Update L1LV's end of list pointer.
L1LV.Leader = L2LV.getEndOfList();
// Clear L2's leader flag:
L2LV.Next = L2LV.getNext();
// L2's leader is now L1.
L2LV.Leader = &L1LV;
return L1;
}
class member_iterator : public std::iterator<std::forward_iterator_tag,
const ElemTy, ptrdiff_t> {
typedef std::iterator<std::forward_iterator_tag,
const ElemTy, ptrdiff_t> super;
const ECValue *Node;
friend class EquivalenceClasses;
public:
typedef size_t size_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
explicit member_iterator() {}
explicit member_iterator(const ECValue *N) : Node(N) {}
reference operator*() const {
assert(Node != nullptr && "Dereferencing end()!");
return Node->getData();
}
pointer operator->() const { return &operator*(); }
member_iterator &operator++() {
assert(Node != nullptr && "++'d off the end of the list!");
Node = Node->getNext();
return *this;
}
member_iterator operator++(int) { // postincrement operators.
member_iterator tmp = *this;
++*this;
return tmp;
}
bool operator==(const member_iterator &RHS) const {
return Node == RHS.Node;
}
bool operator!=(const member_iterator &RHS) const {
return Node != RHS.Node;
}
};
};
} // End llvm namespace
#endif

View File

@ -1,764 +0,0 @@
//===-- llvm/ADT/FoldingSet.h - Uniquing Hash Set ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a hash set that can be used to remove duplication of nodes
// in a graph. This code was originally created by Chris Lattner for use with
// SelectionDAGCSEMap, but was isolated to provide use across the llvm code set.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_FOLDINGSET_H
#define LLVM_ADT_FOLDINGSET_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
namespace llvm {
/// This folding set used for two purposes:
/// 1. Given information about a node we want to create, look up the unique
/// instance of the node in the set. If the node already exists, return
/// it, otherwise return the bucket it should be inserted into.
/// 2. Given a node that has already been created, remove it from the set.
///
/// This class is implemented as a single-link chained hash table, where the
/// "buckets" are actually the nodes themselves (the next pointer is in the
/// node). The last node points back to the bucket to simplify node removal.
///
/// Any node that is to be included in the folding set must be a subclass of
/// FoldingSetNode. The node class must also define a Profile method used to
/// establish the unique bits of data for the node. The Profile method is
/// passed a FoldingSetNodeID object which is used to gather the bits. Just
/// call one of the Add* functions defined in the FoldingSetImpl::NodeID class.
/// NOTE: That the folding set does not own the nodes and it is the
/// responsibility of the user to dispose of the nodes.
///
/// Eg.
/// class MyNode : public FoldingSetNode {
/// private:
/// std::string Name;
/// unsigned Value;
/// public:
/// MyNode(const char *N, unsigned V) : Name(N), Value(V) {}
/// ...
/// void Profile(FoldingSetNodeID &ID) const {
/// ID.AddString(Name);
/// ID.AddInteger(Value);
/// }
/// ...
/// };
///
/// To define the folding set itself use the FoldingSet template;
///
/// Eg.
/// FoldingSet<MyNode> MyFoldingSet;
///
/// Four public methods are available to manipulate the folding set;
///
/// 1) If you have an existing node that you want add to the set but unsure
/// that the node might already exist then call;
///
/// MyNode *M = MyFoldingSet.GetOrInsertNode(N);
///
/// If The result is equal to the input then the node has been inserted.
/// Otherwise, the result is the node existing in the folding set, and the
/// input can be discarded (use the result instead.)
///
/// 2) If you are ready to construct a node but want to check if it already
/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to
/// check;
///
/// FoldingSetNodeID ID;
/// ID.AddString(Name);
/// ID.AddInteger(Value);
/// void *InsertPoint;
///
/// MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
///
/// If found then M with be non-NULL, else InsertPoint will point to where it
/// should be inserted using InsertNode.
///
/// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new
/// node with FindNodeOrInsertPos;
///
/// InsertNode(N, InsertPoint);
///
/// 4) Finally, if you want to remove a node from the folding set call;
///
/// bool WasRemoved = RemoveNode(N);
///
/// The result indicates whether the node existed in the folding set.
class FoldingSetNodeID;
class StringRef;
//===----------------------------------------------------------------------===//
/// FoldingSetImpl - Implements the folding set functionality. The main
/// structure is an array of buckets. Each bucket is indexed by the hash of
/// the nodes it contains. The bucket itself points to the nodes contained
/// in the bucket via a singly linked list. The last node in the list points
/// back to the bucket to facilitate node removal.
///
class FoldingSetImpl {
virtual void anchor(); // Out of line virtual method.
protected:
/// Buckets - Array of bucket chains.
///
void **Buckets;
/// NumBuckets - Length of the Buckets array. Always a power of 2.
///
unsigned NumBuckets;
/// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes
/// is greater than twice the number of buckets.
unsigned NumNodes;
explicit FoldingSetImpl(unsigned Log2InitSize = 6);
FoldingSetImpl(FoldingSetImpl &&Arg);
FoldingSetImpl &operator=(FoldingSetImpl &&RHS);
~FoldingSetImpl();
public:
//===--------------------------------------------------------------------===//
/// Node - This class is used to maintain the singly linked bucket list in
/// a folding set.
///
class Node {
private:
// NextInFoldingSetBucket - next link in the bucket list.
void *NextInFoldingSetBucket;
public:
Node() : NextInFoldingSetBucket(nullptr) {}
// Accessors
void *getNextInBucket() const { return NextInFoldingSetBucket; }
void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; }
};
/// clear - Remove all nodes from the folding set.
void clear();
/// RemoveNode - Remove a node from the folding set, returning true if one
/// was removed or false if the node was not in the folding set.
bool RemoveNode(Node *N);
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and return
/// it instead.
Node *GetOrInsertNode(Node *N);
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(Node *N, void *InsertPos);
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set.
void InsertNode(Node *N) {
Node *Inserted = GetOrInsertNode(N);
(void)Inserted;
assert(Inserted == N && "Node already inserted!");
}
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return NumNodes; }
/// empty - Returns true if there are no nodes in the folding set.
bool empty() const { return NumNodes == 0; }
/// reserve - Increase the number of buckets such that adding the
/// EltCount-th node won't cause a rebucket operation. reserve is permitted
/// to allocate more space than requested by EltCount.
void reserve(unsigned EltCount);
/// capacity - Returns the number of nodes permitted in the folding set
/// before a rebucket operation is performed.
unsigned capacity() {
// We allow a load factor of up to 2.0,
// so that means our capacity is NumBuckets * 2
return NumBuckets * 2;
}
private:
/// GrowHashTable - Double the size of the hash table and rehash everything.
void GrowHashTable();
/// GrowBucketCount - resize the hash table and rehash everything.
/// NewBucketCount must be a power of two, and must be greater than the old
/// bucket count.
void GrowBucketCount(unsigned NewBucketCount);
protected:
/// GetNodeProfile - Instantiations of the FoldingSet template implement
/// this function to gather data bits for the given node.
virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
/// NodeEquals - Instantiations of the FoldingSet template implement
/// this function to compare the given node with the given ID.
virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const=0;
/// ComputeNodeHash - Instantiations of the FoldingSet template implement
/// this function to compute a hash value for the given node.
virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
};
//===----------------------------------------------------------------------===//
template<typename T> struct FoldingSetTrait;
/// DefaultFoldingSetTrait - This class provides default implementations
/// for FoldingSetTrait implementations.
///
template<typename T> struct DefaultFoldingSetTrait {
static void Profile(const T &X, FoldingSetNodeID &ID) {
X.Profile(ID);
}
static void Profile(T &X, FoldingSetNodeID &ID) {
X.Profile(ID);
}
// Equals - Test if the profile for X would match ID, using TempID
// to compute a temporary ID if necessary. The default implementation
// just calls Profile and does a regular comparison. Implementations
// can override this to provide more efficient implementations.
static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID);
// ComputeHash - Compute a hash value for X, using TempID to
// compute a temporary ID if necessary. The default implementation
// just calls Profile and does a regular hash computation.
// Implementations can override this to provide more efficient
// implementations.
static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
};
/// FoldingSetTrait - This trait class is used to define behavior of how
/// to "profile" (in the FoldingSet parlance) an object of a given type.
/// The default behavior is to invoke a 'Profile' method on an object, but
/// through template specialization the behavior can be tailored for specific
/// types. Combined with the FoldingSetNodeWrapper class, one can add objects
/// to FoldingSets that were not originally designed to have that behavior.
template<typename T> struct FoldingSetTrait
: public DefaultFoldingSetTrait<T> {};
template<typename T, typename Ctx> struct ContextualFoldingSetTrait;
/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
/// for ContextualFoldingSets.
template<typename T, typename Ctx>
struct DefaultContextualFoldingSetTrait {
static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
X.Profile(ID, Context);
}
static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID, Ctx Context);
static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
Ctx Context);
};
/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
/// ContextualFoldingSets.
template<typename T, typename Ctx> struct ContextualFoldingSetTrait
: public DefaultContextualFoldingSetTrait<T, Ctx> {};
//===--------------------------------------------------------------------===//
/// FoldingSetNodeIDRef - This class describes a reference to an interned
/// FoldingSetNodeID, which can be a useful to store node id data rather
/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
/// is often much larger than necessary, and the possibility of heap
/// allocation means it requires a non-trivial destructor call.
class FoldingSetNodeIDRef {
const unsigned *Data;
size_t Size;
public:
FoldingSetNodeIDRef() : Data(nullptr), Size(0) {}
FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
/// used to lookup the node in the FoldingSetImpl.
unsigned ComputeHash() const;
bool operator==(FoldingSetNodeIDRef) const;
bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); }
/// Used to compare the "ordering" of two nodes as defined by the
/// profiled bits and their ordering defined by memcmp().
bool operator<(FoldingSetNodeIDRef) const;
const unsigned *getData() const { return Data; }
size_t getSize() const { return Size; }
};
//===--------------------------------------------------------------------===//
/// FoldingSetNodeID - This class is used to gather all the unique data bits of
/// a node. When all the bits are gathered this class is used to produce a
/// hash value for the node.
///
class FoldingSetNodeID {
/// Bits - Vector of all the data bits that make the node unique.
/// Use a SmallVector to avoid a heap allocation in the common case.
SmallVector<unsigned, 32> Bits;
public:
FoldingSetNodeID() {}
FoldingSetNodeID(FoldingSetNodeIDRef Ref)
: Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
/// Add* - Add various data types to Bit data.
///
void AddPointer(const void *Ptr);
void AddInteger(signed I);
void AddInteger(unsigned I);
void AddInteger(long I);
void AddInteger(unsigned long I);
void AddInteger(long long I);
void AddInteger(unsigned long long I);
void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
void AddString(StringRef String);
void AddNodeID(const FoldingSetNodeID &ID);
template <typename T>
inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); }
/// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
/// object to be used to compute a new profile.
inline void clear() { Bits.clear(); }
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
/// to lookup the node in the FoldingSetImpl.
unsigned ComputeHash() const;
/// operator== - Used to compare two nodes to each other.
///
bool operator==(const FoldingSetNodeID &RHS) const;
bool operator==(const FoldingSetNodeIDRef RHS) const;
bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); }
bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);}
/// Used to compare the "ordering" of two nodes as defined by the
/// profiled bits and their ordering defined by memcmp().
bool operator<(const FoldingSetNodeID &RHS) const;
bool operator<(const FoldingSetNodeIDRef RHS) const;
/// Intern - Copy this node's data to a memory region allocated from the
/// given allocator and return a FoldingSetNodeIDRef describing the
/// interned data.
FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
};
// Convenience type to hide the implementation of the folding set.
typedef FoldingSetImpl::Node FoldingSetNode;
template<class T> class FoldingSetIterator;
template<class T> class FoldingSetBucketIterator;
// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
// require the definition of FoldingSetNodeID.
template<typename T>
inline bool
DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
unsigned /*IDHash*/,
FoldingSetNodeID &TempID) {
FoldingSetTrait<T>::Profile(X, TempID);
return TempID == ID;
}
template<typename T>
inline unsigned
DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
FoldingSetTrait<T>::Profile(X, TempID);
return TempID.ComputeHash();
}
template<typename T, typename Ctx>
inline bool
DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
const FoldingSetNodeID &ID,
unsigned /*IDHash*/,
FoldingSetNodeID &TempID,
Ctx Context) {
ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
return TempID == ID;
}
template<typename T, typename Ctx>
inline unsigned
DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
FoldingSetNodeID &TempID,
Ctx Context) {
ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
return TempID.ComputeHash();
}
//===----------------------------------------------------------------------===//
/// FoldingSet - This template class is used to instantiate a specialized
/// implementation of the folding set to the node class T. T must be a
/// subclass of FoldingSetNode and implement a Profile function.
///
/// Note that this set type is movable and move-assignable. However, its
/// moved-from state is not a valid state for anything other than
/// move-assigning and destroying. This is primarily to enable movable APIs
/// that incorporate these objects.
template <class T> class FoldingSet final : public FoldingSetImpl {
private:
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
T *TN = static_cast<T *>(N);
FoldingSetTrait<T>::Profile(*TN, ID);
}
/// NodeEquals - Instantiations may optionally provide a way to compare a
/// node with a specified ID.
bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
}
/// ComputeNodeHash - Instantiations may optionally provide a way to compute a
/// hash value directly from a node.
unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
}
public:
explicit FoldingSet(unsigned Log2InitSize = 6)
: FoldingSetImpl(Log2InitSize) {}
FoldingSet(FoldingSet &&Arg) : FoldingSetImpl(std::move(Arg)) {}
FoldingSet &operator=(FoldingSet &&RHS) {
(void)FoldingSetImpl::operator=(std::move(RHS));
return *this;
}
typedef FoldingSetIterator<T> iterator;
iterator begin() { return iterator(Buckets); }
iterator end() { return iterator(Buckets+NumBuckets); }
typedef FoldingSetIterator<const T> const_iterator;
const_iterator begin() const { return const_iterator(Buckets); }
const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
typedef FoldingSetBucketIterator<T> bucket_iterator;
bucket_iterator bucket_begin(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
}
bucket_iterator bucket_end(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
}
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and
/// return it instead.
T *GetOrInsertNode(Node *N) {
return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
}
};
//===----------------------------------------------------------------------===//
/// ContextualFoldingSet - This template class is a further refinement
/// of FoldingSet which provides a context argument when calling
/// Profile on its nodes. Currently, that argument is fixed at
/// initialization time.
///
/// T must be a subclass of FoldingSetNode and implement a Profile
/// function with signature
/// void Profile(llvm::FoldingSetNodeID &, Ctx);
template <class T, class Ctx>
class ContextualFoldingSet final : public FoldingSetImpl {
// Unfortunately, this can't derive from FoldingSet<T> because the
// construction vtable for FoldingSet<T> requires
// FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
// requires a single-argument T::Profile().
private:
Ctx Context;
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
void GetNodeProfile(FoldingSetImpl::Node *N,
FoldingSetNodeID &ID) const override {
T *TN = static_cast<T *>(N);
ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
}
bool NodeEquals(FoldingSetImpl::Node *N, const FoldingSetNodeID &ID,
unsigned IDHash, FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
Context);
}
unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
}
public:
explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
: FoldingSetImpl(Log2InitSize), Context(Context)
{}
Ctx getContext() const { return Context; }
typedef FoldingSetIterator<T> iterator;
iterator begin() { return iterator(Buckets); }
iterator end() { return iterator(Buckets+NumBuckets); }
typedef FoldingSetIterator<const T> const_iterator;
const_iterator begin() const { return const_iterator(Buckets); }
const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
typedef FoldingSetBucketIterator<T> bucket_iterator;
bucket_iterator bucket_begin(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
}
bucket_iterator bucket_end(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
}
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N'
/// and return it instead.
T *GetOrInsertNode(Node *N) {
return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it
/// exists, return it. If not, return the insertion token that will
/// make insertion faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
}
};
//===----------------------------------------------------------------------===//
/// FoldingSetVector - This template class combines a FoldingSet and a vector
/// to provide the interface of FoldingSet but with deterministic iteration
/// order based on the insertion order. T must be a subclass of FoldingSetNode
/// and implement a Profile function.
template <class T, class VectorT = SmallVector<T*, 8> >
class FoldingSetVector {
FoldingSet<T> Set;
VectorT Vector;
public:
explicit FoldingSetVector(unsigned Log2InitSize = 6)
: Set(Log2InitSize) {
}
typedef pointee_iterator<typename VectorT::iterator> iterator;
iterator begin() { return Vector.begin(); }
iterator end() { return Vector.end(); }
typedef pointee_iterator<typename VectorT::const_iterator> const_iterator;
const_iterator begin() const { return Vector.begin(); }
const_iterator end() const { return Vector.end(); }
/// clear - Remove all nodes from the folding set.
void clear() { Set.clear(); Vector.clear(); }
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return Set.FindNodeOrInsertPos(ID, InsertPos);
}
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and
/// return it instead.
T *GetOrInsertNode(T *N) {
T *Result = Set.GetOrInsertNode(N);
if (Result == N) Vector.push_back(N);
return Result;
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(T *N, void *InsertPos) {
Set.InsertNode(N, InsertPos);
Vector.push_back(N);
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set.
void InsertNode(T *N) {
Set.InsertNode(N);
Vector.push_back(N);
}
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return Set.size(); }
/// empty - Returns true if there are no nodes in the folding set.
bool empty() const { return Set.empty(); }
};
//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
protected:
FoldingSetNode *NodePtr;
FoldingSetIteratorImpl(void **Bucket);
void advance();
public:
bool operator==(const FoldingSetIteratorImpl &RHS) const {
return NodePtr == RHS.NodePtr;
}
bool operator!=(const FoldingSetIteratorImpl &RHS) const {
return NodePtr != RHS.NodePtr;
}
};
template <class T> class FoldingSetIterator : public FoldingSetIteratorImpl {
public:
explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {}
T &operator*() const {
return *static_cast<T*>(NodePtr);
}
T *operator->() const {
return static_cast<T*>(NodePtr);
}
inline FoldingSetIterator &operator++() { // Preincrement
advance();
return *this;
}
FoldingSetIterator operator++(int) { // Postincrement
FoldingSetIterator tmp = *this; ++*this; return tmp;
}
};
//===----------------------------------------------------------------------===//
/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
/// shared by all folding sets, which knows how to walk a particular bucket
/// of a folding set hash table.
class FoldingSetBucketIteratorImpl {
protected:
void *Ptr;
explicit FoldingSetBucketIteratorImpl(void **Bucket);
FoldingSetBucketIteratorImpl(void **Bucket, bool)
: Ptr(Bucket) {}
void advance() {
void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket();
uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1;
Ptr = reinterpret_cast<void*>(x);
}
public:
bool operator==(const FoldingSetBucketIteratorImpl &RHS) const {
return Ptr == RHS.Ptr;
}
bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const {
return Ptr != RHS.Ptr;
}
};
template <class T>
class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl {
public:
explicit FoldingSetBucketIterator(void **Bucket) :
FoldingSetBucketIteratorImpl(Bucket) {}
FoldingSetBucketIterator(void **Bucket, bool) :
FoldingSetBucketIteratorImpl(Bucket, true) {}
T &operator*() const { return *static_cast<T*>(Ptr); }
T *operator->() const { return static_cast<T*>(Ptr); }
inline FoldingSetBucketIterator &operator++() { // Preincrement
advance();
return *this;
}
FoldingSetBucketIterator operator++(int) { // Postincrement
FoldingSetBucketIterator tmp = *this; ++*this; return tmp;
}
};
//===----------------------------------------------------------------------===//
/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary
/// types in an enclosing object so that they can be inserted into FoldingSets.
template <typename T>
class FoldingSetNodeWrapper : public FoldingSetNode {
T data;
public:
template <typename... Ts>
explicit FoldingSetNodeWrapper(Ts &&... Args)
: data(std::forward<Ts>(Args)...) {}
void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); }
T &getValue() { return data; }
const T &getValue() const { return data; }
operator T&() { return data; }
operator const T&() const { return data; }
};
//===----------------------------------------------------------------------===//
/// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores
/// a FoldingSetNodeID value rather than requiring the node to recompute it
/// each time it is needed. This trades space for speed (which can be
/// significant if the ID is long), and it also permits nodes to drop
/// information that would otherwise only be required for recomputing an ID.
class FastFoldingSetNode : public FoldingSetNode {
FoldingSetNodeID FastID;
protected:
explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}
public:
void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); }
};
//===----------------------------------------------------------------------===//
// Partial specializations of FoldingSetTrait.
template<typename T> struct FoldingSetTrait<T*> {
static inline void Profile(T *X, FoldingSetNodeID &ID) {
ID.AddPointer(X);
}
};
template <typename T1, typename T2>
struct FoldingSetTrait<std::pair<T1, T2>> {
static inline void Profile(const std::pair<T1, T2> &P,
llvm::FoldingSetNodeID &ID) {
ID.Add(P.first);
ID.Add(P.second);
}
};
} // End of namespace llvm.
#endif

View File

@ -1,111 +0,0 @@
//===-- llvm/ADT/GraphTraits.h - Graph traits template ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the little GraphTraits<X> template class that should be
// specialized by classes that want to be iteratable by generic graph iterators.
//
// This file also defines the marker class Inverse that is used to iterate over
// graphs in a graph defined, inverse ordering...
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_GRAPHTRAITS_H
#define LLVM_ADT_GRAPHTRAITS_H
namespace llvm {
// GraphTraits - This class should be specialized by different graph types...
// which is why the default version is empty.
//
template<class GraphType>
struct GraphTraits {
// Elements to provide:
// NOTICE: We are in a transition from migration interfaces that require
// NodeType *, to NodeRef. NodeRef is required to be cheap to copy, but does
// not have to be a raw pointer. In the transition, user should define
// NodeType, and NodeRef = NodeType *.
//
// typedef NodeType - Type of Node in the graph
// typedef NodeRef - NodeType *
// typedef ChildIteratorType - Type used to iterate over children in graph
// static NodeRef getEntryNode(const GraphType &)
// Return the entry node of the graph
// static ChildIteratorType child_begin(NodeRef)
// static ChildIteratorType child_end (NodeRef)
// Return iterators that point to the beginning and ending of the child
// node list for the specified node.
//
// typedef ...iterator nodes_iterator;
// static nodes_iterator nodes_begin(GraphType *G)
// static nodes_iterator nodes_end (GraphType *G)
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
// static unsigned size (GraphType *G)
// Return total number of nodes in the graph
//
// If anyone tries to use this class without having an appropriate
// specialization, make an error. If you get this error, it's because you
// need to include the appropriate specialization of GraphTraits<> for your
// graph, or you need to define it for a new graph type. Either that or
// your argument to XXX_begin(...) is unknown or needs to have the proper .h
// file #include'd.
//
typedef typename GraphType::UnknownGraphTypeError NodeRef;
};
// Inverse - This class is used as a little marker class to tell the graph
// iterator to iterate over the graph in a graph defined "Inverse" ordering.
// Not all graphs define an inverse ordering, and if they do, it depends on
// the graph exactly what that is. Here's an example of usage with the
// df_iterator:
//
// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M);
// for (; I != E; ++I) { ... }
//
// Which is equivalent to:
// df_iterator<Inverse<Method*> > I = idf_begin(M), E = idf_end(M);
// for (; I != E; ++I) { ... }
//
template <class GraphType>
struct Inverse {
const GraphType &Graph;
inline Inverse(const GraphType &G) : Graph(G) {}
};
// Provide a partial specialization of GraphTraits so that the inverse of an
// inverse falls back to the original graph.
template<class T>
struct GraphTraits<Inverse<Inverse<T> > > {
typedef typename GraphTraits<T>::NodeType NodeType;
typedef typename GraphTraits<T>::ChildIteratorType ChildIteratorType;
static NodeType *getEntryNode(Inverse<Inverse<T> > *G) {
return GraphTraits<T>::getEntryNode(G->Graph.Graph);
}
static ChildIteratorType child_begin(NodeType* N) {
return GraphTraits<T>::child_begin(N);
}
static ChildIteratorType child_end(NodeType* N) {
return GraphTraits<T>::child_end(N);
}
};
} // End llvm namespace
#endif

View File

@ -1,661 +0,0 @@
//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the newly proposed standard C++ interfaces for hashing
// arbitrary data and building hash functions for user-defined types. This
// interface was originally proposed in N3333[1] and is currently under review
// for inclusion in a future TR and/or standard.
//
// The primary interfaces provide are comprised of one type and three functions:
//
// -- 'hash_code' class is an opaque type representing the hash code for some
// data. It is the intended product of hashing, and can be used to implement
// hash tables, checksumming, and other common uses of hashes. It is not an
// integer type (although it can be converted to one) because it is risky
// to assume much about the internals of a hash_code. In particular, each
// execution of the program has a high probability of producing a different
// hash_code for a given input. Thus their values are not stable to save or
// persist, and should only be used during the execution for the
// construction of hashing datastructures.
//
// -- 'hash_value' is a function designed to be overloaded for each
// user-defined type which wishes to be used within a hashing context. It
// should be overloaded within the user-defined type's namespace and found
// via ADL. Overloads for primitive types are provided by this library.
//
// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
// programmers in easily and intuitively combining a set of data into
// a single hash_code for their object. They should only logically be used
// within the implementation of a 'hash_value' routine or similar context.
//
// Note that 'hash_combine_range' contains very special logic for hashing
// a contiguous array of integers or pointers. This logic is *extremely* fast,
// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
// under 32-bytes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_HASHING_H
#define LLVM_ADT_HASHING_H
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <string>
#include <utility>
namespace llvm {
/// \brief An opaque object representing a hash code.
///
/// This object represents the result of hashing some entity. It is intended to
/// be used to implement hashtables or other hashing-based data structures.
/// While it wraps and exposes a numeric value, this value should not be
/// trusted to be stable or predictable across processes or executions.
///
/// In order to obtain the hash_code for an object 'x':
/// \code
/// using llvm::hash_value;
/// llvm::hash_code code = hash_value(x);
/// \endcode
class hash_code {
size_t value;
public:
/// \brief Default construct a hash_code.
/// Note that this leaves the value uninitialized.
hash_code() = default;
/// \brief Form a hash code directly from a numerical value.
hash_code(size_t value) : value(value) {}
/// \brief Convert the hash code to its numerical value for use.
/*explicit*/ operator size_t() const { return value; }
friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
return lhs.value == rhs.value;
}
friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
return lhs.value != rhs.value;
}
/// \brief Allow a hash_code to be directly run through hash_value.
friend size_t hash_value(const hash_code &code) { return code.value; }
};
/// \brief Compute a hash_code for any integer value.
///
/// Note that this function is intended to compute the same hash_code for
/// a particular value without regard to the pre-promotion type. This is in
/// contrast to hash_combine which may produce different hash_codes for
/// differing argument types even if they would implicit promote to a common
/// type without changing the value.
template <typename T>
typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
hash_value(T value);
/// \brief Compute a hash_code for a pointer's address.
///
/// N.B.: This hashes the *address*. Not the value and not the type.
template <typename T> hash_code hash_value(const T *ptr);
/// \brief Compute a hash_code for a pair of objects.
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg);
/// \brief Compute a hash_code for a standard string.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg);
/// \brief Override the execution seed with a fixed value.
///
/// This hashing library uses a per-execution seed designed to change on each
/// run with high probability in order to ensure that the hash codes are not
/// attackable and to ensure that output which is intended to be stable does
/// not rely on the particulars of the hash codes produced.
///
/// That said, there are use cases where it is important to be able to
/// reproduce *exactly* a specific behavior. To that end, we provide a function
/// which will forcibly set the seed to a fixed value. This must be done at the
/// start of the program, before any hashes are computed. Also, it cannot be
/// undone. This makes it thread-hostile and very hard to use outside of
/// immediately on start of a simple program designed for reproducible
/// behavior.
void set_fixed_execution_hash_seed(size_t fixed_value);
// All of the implementation details of actually computing the various hash
// code values are held within this namespace. These routines are included in
// the header file mainly to allow inlining and constant propagation.
namespace hashing {
namespace detail {
inline uint64_t fetch64(const char *p) {
uint64_t result;
memcpy(&result, p, sizeof(result));
if (sys::IsBigEndianHost)
sys::swapByteOrder(result);
return result;
}
inline uint32_t fetch32(const char *p) {
uint32_t result;
memcpy(&result, p, sizeof(result));
if (sys::IsBigEndianHost)
sys::swapByteOrder(result);
return result;
}
/// Some primes between 2^63 and 2^64 for various uses.
static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
static const uint64_t k1 = 0xb492b66fbe98f273ULL;
static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
static const uint64_t k3 = 0xc949d7c7509e6557ULL;
/// \brief Bitwise right rotate.
/// Normally this will compile to a single instruction, especially if the
/// shift is a manifest constant.
inline uint64_t rotate(uint64_t val, size_t shift) {
// Avoid shifting by 64: doing so yields an undefined result.
return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
}
inline uint64_t shift_mix(uint64_t val) {
return val ^ (val >> 47);
}
inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
// Murmur-inspired hashing.
const uint64_t kMul = 0x9ddfea08eb382d69ULL;
uint64_t a = (low ^ high) * kMul;
a ^= (a >> 47);
uint64_t b = (high ^ a) * kMul;
b ^= (b >> 47);
b *= kMul;
return b;
}
inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
uint8_t a = s[0];
uint8_t b = s[len >> 1];
uint8_t c = s[len - 1];
uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
uint32_t z = len + (static_cast<uint32_t>(c) << 2);
return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
}
inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
uint64_t a = fetch32(s);
return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
}
inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
uint64_t a = fetch64(s);
uint64_t b = fetch64(s + len - 8);
return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
}
inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
uint64_t a = fetch64(s) * k1;
uint64_t b = fetch64(s + 8);
uint64_t c = fetch64(s + len - 8) * k2;
uint64_t d = fetch64(s + len - 16) * k0;
return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
a + rotate(b ^ k3, 20) - c + len + seed);
}
inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
uint64_t z = fetch64(s + 24);
uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
uint64_t b = rotate(a + z, 52);
uint64_t c = rotate(a, 37);
a += fetch64(s + 8);
c += rotate(a, 7);
a += fetch64(s + 16);
uint64_t vf = a + z;
uint64_t vs = b + rotate(a, 31) + c;
a = fetch64(s + 16) + fetch64(s + len - 32);
z = fetch64(s + len - 8);
b = rotate(a + z, 52);
c = rotate(a, 37);
a += fetch64(s + len - 24);
c += rotate(a, 7);
a += fetch64(s + len - 16);
uint64_t wf = a + z;
uint64_t ws = b + rotate(a, 31) + c;
uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
return shift_mix((seed ^ (r * k0)) + vs) * k2;
}
inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
if (length >= 4 && length <= 8)
return hash_4to8_bytes(s, length, seed);
if (length > 8 && length <= 16)
return hash_9to16_bytes(s, length, seed);
if (length > 16 && length <= 32)
return hash_17to32_bytes(s, length, seed);
if (length > 32)
return hash_33to64_bytes(s, length, seed);
if (length != 0)
return hash_1to3_bytes(s, length, seed);
return k2 ^ seed;
}
/// \brief The intermediate state used during hashing.
/// Currently, the algorithm for computing hash codes is based on CityHash and
/// keeps 56 bytes of arbitrary state.
struct hash_state {
uint64_t h0, h1, h2, h3, h4, h5, h6;
/// \brief Create a new hash_state structure and initialize it based on the
/// seed and the first 64-byte chunk.
/// This effectively performs the initial mix.
static hash_state create(const char *s, uint64_t seed) {
hash_state state = {
0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
seed * k1, shift_mix(seed), 0 };
state.h6 = hash_16_bytes(state.h4, state.h5);
state.mix(s);
return state;
}
/// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
/// and 'b', including whatever is already in 'a' and 'b'.
static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
a += fetch64(s);
uint64_t c = fetch64(s + 24);
b = rotate(b + a + c, 21);
uint64_t d = a;
a += fetch64(s + 8) + fetch64(s + 16);
b += rotate(a, 44) + d;
a += c;
}
/// \brief Mix in a 64-byte buffer of data.
/// We mix all 64 bytes even when the chunk length is smaller, but we
/// record the actual length.
void mix(const char *s) {
h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
h0 ^= h6;
h1 += h3 + fetch64(s + 40);
h2 = rotate(h2 + h5, 33) * k1;
h3 = h4 * k1;
h4 = h0 + h5;
mix_32_bytes(s, h3, h4);
h5 = h2 + h6;
h6 = h1 + fetch64(s + 16);
mix_32_bytes(s + 32, h5, h6);
std::swap(h2, h0);
}
/// \brief Compute the final 64-bit hash code value based on the current
/// state and the length of bytes hashed.
uint64_t finalize(size_t length) {
return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
}
};
/// \brief A global, fixed seed-override variable.
///
/// This variable can be set using the \see llvm::set_fixed_execution_seed
/// function. See that function for details. Do not, under any circumstances,
/// set or read this variable.
extern size_t fixed_seed_override;
inline size_t get_execution_seed() {
// FIXME: This needs to be a per-execution seed. This is just a placeholder
// implementation. Switching to a per-execution seed is likely to flush out
// instability bugs and so will happen as its own commit.
//
// However, if there is a fixed seed override set the first time this is
// called, return that instead of the per-execution seed.
const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
static size_t seed = fixed_seed_override ? fixed_seed_override
: (size_t)seed_prime;
return seed;
}
/// \brief Trait to indicate whether a type's bits can be hashed directly.
///
/// A type trait which is true if we want to combine values for hashing by
/// reading the underlying data. It is false if values of this type must
/// first be passed to hash_value, and the resulting hash_codes combined.
//
// FIXME: We want to replace is_integral_or_enum and is_pointer here with
// a predicate which asserts that comparing the underlying storage of two
// values of the type for equality is equivalent to comparing the two values
// for equality. For all the platforms we care about, this holds for integers
// and pointers, but there are platforms where it doesn't and we would like to
// support user-defined types which happen to satisfy this property.
template <typename T> struct is_hashable_data
: std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
std::is_pointer<T>::value) &&
64 % sizeof(T) == 0)> {};
// Special case std::pair to detect when both types are viable and when there
// is no alignment-derived padding in the pair. This is a bit of a lie because
// std::pair isn't truly POD, but it's close enough in all reasonable
// implementations for our use case of hashing the underlying data.
template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
: std::integral_constant<bool, (is_hashable_data<T>::value &&
is_hashable_data<U>::value &&
(sizeof(T) + sizeof(U)) ==
sizeof(std::pair<T, U>))> {};
/// \brief Helper to get the hashable data representation for a type.
/// This variant is enabled when the type itself can be used.
template <typename T>
typename std::enable_if<is_hashable_data<T>::value, T>::type
get_hashable_data(const T &value) {
return value;
}
/// \brief Helper to get the hashable data representation for a type.
/// This variant is enabled when we must first call hash_value and use the
/// result as our data.
template <typename T>
typename std::enable_if<!is_hashable_data<T>::value, size_t>::type
get_hashable_data(const T &value) {
using ::llvm::hash_value;
return hash_value(value);
}
/// \brief Helper to store data from a value into a buffer and advance the
/// pointer into that buffer.
///
/// This routine first checks whether there is enough space in the provided
/// buffer, and if not immediately returns false. If there is space, it
/// copies the underlying bytes of value into the buffer, advances the
/// buffer_ptr past the copied bytes, and returns true.
template <typename T>
bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
size_t offset = 0) {
size_t store_size = sizeof(value) - offset;
if (buffer_ptr + store_size > buffer_end)
return false;
const char *value_data = reinterpret_cast<const char *>(&value);
memcpy(buffer_ptr, value_data + offset, store_size);
buffer_ptr += store_size;
return true;
}
/// \brief Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is
/// integral. Rather than computing a hash_code for each object and then
/// combining them, this (as an optimization) directly combines the integers.
template <typename InputIteratorT>
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
const size_t seed = get_execution_seed();
char buffer[64], *buffer_ptr = buffer;
char *const buffer_end = std::end(buffer);
while (first != last && store_and_advance(buffer_ptr, buffer_end,
get_hashable_data(*first)))
++first;
if (first == last)
return hash_short(buffer, buffer_ptr - buffer, seed);
assert(buffer_ptr == buffer_end);
hash_state state = state.create(buffer, seed);
size_t length = 64;
while (first != last) {
// Fill up the buffer. We don't clear it, which re-mixes the last round
// when only a partial 64-byte chunk is left.
buffer_ptr = buffer;
while (first != last && store_and_advance(buffer_ptr, buffer_end,
get_hashable_data(*first)))
++first;
// Rotate the buffer if we did a partial fill in order to simulate doing
// a mix of the last 64-bytes. That is how the algorithm works when we
// have a contiguous byte sequence, and we want to emulate that here.
std::rotate(buffer, buffer_ptr, buffer_end);
// Mix this chunk into the current state.
state.mix(buffer);
length += buffer_ptr - buffer;
};
return state.finalize(length);
}
/// \brief Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is integral
/// and when the input iterator is actually a pointer. Rather than computing
/// a hash_code for each object and then combining them, this (as an
/// optimization) directly combines the integers. Also, because the integers
/// are stored in contiguous memory, this routine avoids copying each value
/// and directly reads from the underlying memory.
template <typename ValueT>
typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
hash_combine_range_impl(ValueT *first, ValueT *last) {
const size_t seed = get_execution_seed();
const char *s_begin = reinterpret_cast<const char *>(first);
const char *s_end = reinterpret_cast<const char *>(last);
const size_t length = std::distance(s_begin, s_end);
if (length <= 64)
return hash_short(s_begin, length, seed);
const char *s_aligned_end = s_begin + (length & ~63);
hash_state state = state.create(s_begin, seed);
s_begin += 64;
while (s_begin != s_aligned_end) {
state.mix(s_begin);
s_begin += 64;
}
if (length & 63)
state.mix(s_end - 64);
return state.finalize(length);
}
} // namespace detail
} // namespace hashing
/// \brief Compute a hash_code for a sequence of values.
///
/// This hashes a sequence of values. It produces the same hash_code as
/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
/// and is significantly faster given pointers and types which can be hashed as
/// a sequence of bytes.
template <typename InputIteratorT>
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
}
// Implementation details for hash_combine.
namespace hashing {
namespace detail {
/// \brief Helper class to manage the recursive combining of hash_combine
/// arguments.
///
/// This class exists to manage the state and various calls involved in the
/// recursive combining of arguments used in hash_combine. It is particularly
/// useful at minimizing the code in the recursive calls to ease the pain
/// caused by a lack of variadic functions.
struct hash_combine_recursive_helper {
char buffer[64];
hash_state state;
const size_t seed;
public:
/// \brief Construct a recursive hash combining helper.
///
/// This sets up the state for a recursive hash combine, including getting
/// the seed and buffer setup.
hash_combine_recursive_helper()
: seed(get_execution_seed()) {}
/// \brief Combine one chunk of data into the current in-flight hash.
///
/// This merges one chunk of data into the hash. First it tries to buffer
/// the data. If the buffer is full, it hashes the buffer into its
/// hash_state, empties it, and then merges the new chunk in. This also
/// handles cases where the data straddles the end of the buffer.
template <typename T>
char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
if (!store_and_advance(buffer_ptr, buffer_end, data)) {
// Check for skew which prevents the buffer from being packed, and do
// a partial store into the buffer to fill it. This is only a concern
// with the variadic combine because that formation can have varying
// argument types.
size_t partial_store_size = buffer_end - buffer_ptr;
memcpy(buffer_ptr, &data, partial_store_size);
// If the store fails, our buffer is full and ready to hash. We have to
// either initialize the hash state (on the first full buffer) or mix
// this buffer into the existing hash state. Length tracks the *hashed*
// length, not the buffered length.
if (length == 0) {
state = state.create(buffer, seed);
length = 64;
} else {
// Mix this chunk into the current state and bump length up by 64.
state.mix(buffer);
length += 64;
}
// Reset the buffer_ptr to the head of the buffer for the next chunk of
// data.
buffer_ptr = buffer;
// Try again to store into the buffer -- this cannot fail as we only
// store types smaller than the buffer.
if (!store_and_advance(buffer_ptr, buffer_end, data,
partial_store_size))
abort();
}
return buffer_ptr;
}
/// \brief Recursive, variadic combining method.
///
/// This function recurses through each argument, combining that argument
/// into a single hash.
template <typename T, typename ...Ts>
hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
const T &arg, const Ts &...args) {
buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
// Recurse to the next argument.
return combine(length, buffer_ptr, buffer_end, args...);
}
/// \brief Base case for recursive, variadic combining.
///
/// The base case when combining arguments recursively is reached when all
/// arguments have been handled. It flushes the remaining buffer and
/// constructs a hash_code.
hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
// Check whether the entire set of values fit in the buffer. If so, we'll
// use the optimized short hashing routine and skip state entirely.
if (length == 0)
return hash_short(buffer, buffer_ptr - buffer, seed);
// Mix the final buffer, rotating it if we did a partial fill in order to
// simulate doing a mix of the last 64-bytes. That is how the algorithm
// works when we have a contiguous byte sequence, and we want to emulate
// that here.
std::rotate(buffer, buffer_ptr, buffer_end);
// Mix this chunk into the current state.
state.mix(buffer);
length += buffer_ptr - buffer;
return state.finalize(length);
}
};
} // namespace detail
} // namespace hashing
/// \brief Combine values into a single hash_code.
///
/// This routine accepts a varying number of arguments of any type. It will
/// attempt to combine them into a single hash_code. For user-defined types it
/// attempts to call a \see hash_value overload (via ADL) for the type. For
/// integer and pointer types it directly combines their data into the
/// resulting hash_code.
///
/// The result is suitable for returning from a user's hash_value
/// *implementation* for their user-defined type. Consumers of a type should
/// *not* call this routine, they should instead call 'hash_value'.
template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
// Recursively hash each argument using a helper class.
::llvm::hashing::detail::hash_combine_recursive_helper helper;
return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
}
// Implementation details for implementations of hash_value overloads provided
// here.
namespace hashing {
namespace detail {
/// \brief Helper to hash the value of a single integer.
///
/// Overloads for smaller integer types are not provided to ensure consistent
/// behavior in the presence of integral promotions. Essentially,
/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
inline hash_code hash_integer_value(uint64_t value) {
// Similar to hash_4to8_bytes but using a seed instead of length.
const uint64_t seed = get_execution_seed();
const char *s = reinterpret_cast<const char *>(&value);
const uint64_t a = fetch32(s);
return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
}
} // namespace detail
} // namespace hashing
// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T>
typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
hash_value(T value) {
return ::llvm::hashing::detail::hash_integer_value(
static_cast<uint64_t>(value));
}
// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T> hash_code hash_value(const T *ptr) {
return ::llvm::hashing::detail::hash_integer_value(
reinterpret_cast<uintptr_t>(ptr));
}
// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg) {
return hash_combine(arg.first, arg.second);
}
// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg) {
return hash_combine_range(arg.begin(), arg.end());
}
} // namespace llvm
#endif

View File

@ -1,229 +0,0 @@
//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ImmutableList class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_IMMUTABLELIST_H
#define LLVM_ADT_IMMUTABLELIST_H
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
template <typename T> class ImmutableListFactory;
template <typename T>
class ImmutableListImpl : public FoldingSetNode {
T Head;
const ImmutableListImpl* Tail;
ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
: Head(head), Tail(tail) {}
friend class ImmutableListFactory<T>;
void operator=(const ImmutableListImpl&) = delete;
ImmutableListImpl(const ImmutableListImpl&) = delete;
public:
const T& getHead() const { return Head; }
const ImmutableListImpl* getTail() const { return Tail; }
static inline void Profile(FoldingSetNodeID& ID, const T& H,
const ImmutableListImpl* L){
ID.AddPointer(L);
ID.Add(H);
}
void Profile(FoldingSetNodeID& ID) {
Profile(ID, Head, Tail);
}
};
/// ImmutableList - This class represents an immutable (functional) list.
/// It is implemented as a smart pointer (wraps ImmutableListImpl), so it
/// it is intended to always be copied by value as if it were a pointer.
/// This interface matches ImmutableSet and ImmutableMap. ImmutableList
/// objects should almost never be created directly, and instead should
/// be created by ImmutableListFactory objects that manage the lifetime
/// of a group of lists. When the factory object is reclaimed, all lists
/// created by that factory are released as well.
template <typename T>
class ImmutableList {
public:
typedef T value_type;
typedef ImmutableListFactory<T> Factory;
private:
const ImmutableListImpl<T>* X;
public:
// This constructor should normally only be called by ImmutableListFactory<T>.
// There may be cases, however, when one needs to extract the internal pointer
// and reconstruct a list object from that pointer.
ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}
const ImmutableListImpl<T>* getInternalPointer() const {
return X;
}
class iterator {
const ImmutableListImpl<T>* L;
public:
iterator() : L(nullptr) {}
iterator(ImmutableList l) : L(l.getInternalPointer()) {}
iterator& operator++() { L = L->getTail(); return *this; }
bool operator==(const iterator& I) const { return L == I.L; }
bool operator!=(const iterator& I) const { return L != I.L; }
const value_type& operator*() const { return L->getHead(); }
ImmutableList getList() const { return L; }
};
/// begin - Returns an iterator referring to the head of the list, or
/// an iterator denoting the end of the list if the list is empty.
iterator begin() const { return iterator(X); }
/// end - Returns an iterator denoting the end of the list. This iterator
/// does not refer to a valid list element.
iterator end() const { return iterator(); }
/// isEmpty - Returns true if the list is empty.
bool isEmpty() const { return !X; }
bool contains(const T& V) const {
for (iterator I = begin(), E = end(); I != E; ++I) {
if (*I == V)
return true;
}
return false;
}
/// isEqual - Returns true if two lists are equal. Because all lists created
/// from the same ImmutableListFactory are uniqued, this has O(1) complexity
/// because it the contents of the list do not need to be compared. Note
/// that you should only compare two lists created from the same
/// ImmutableListFactory.
bool isEqual(const ImmutableList& L) const { return X == L.X; }
bool operator==(const ImmutableList& L) const { return isEqual(L); }
/// getHead - Returns the head of the list.
const T& getHead() {
assert (!isEmpty() && "Cannot get the head of an empty list.");
return X->getHead();
}
/// getTail - Returns the tail of the list, which is another (possibly empty)
/// ImmutableList.
ImmutableList getTail() {
return X ? X->getTail() : nullptr;
}
void Profile(FoldingSetNodeID& ID) const {
ID.AddPointer(X);
}
};
template <typename T>
class ImmutableListFactory {
typedef ImmutableListImpl<T> ListTy;
typedef FoldingSet<ListTy> CacheTy;
CacheTy Cache;
uintptr_t Allocator;
bool ownsAllocator() const {
return Allocator & 0x1 ? false : true;
}
BumpPtrAllocator& getAllocator() const {
return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
}
public:
ImmutableListFactory()
: Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
ImmutableListFactory(BumpPtrAllocator& Alloc)
: Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
~ImmutableListFactory() {
if (ownsAllocator()) delete &getAllocator();
}
ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
// Profile the new list to see if it already exists in our cache.
FoldingSetNodeID ID;
void* InsertPos;
const ListTy* TailImpl = Tail.getInternalPointer();
ListTy::Profile(ID, Head, TailImpl);
ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);
if (!L) {
// The list does not exist in our cache. Create it.
BumpPtrAllocator& A = getAllocator();
L = (ListTy*) A.Allocate<ListTy>();
new (L) ListTy(Head, TailImpl);
// Insert the new list into the cache.
Cache.InsertNode(L, InsertPos);
}
return L;
}
ImmutableList<T> add(const T& D, ImmutableList<T> L) {
return concat(D, L);
}
ImmutableList<T> getEmptyList() const {
return ImmutableList<T>(nullptr);
}
ImmutableList<T> create(const T& X) {
return Concat(X, getEmptyList());
}
};
//===----------------------------------------------------------------------===//
// Partially-specialized Traits.
//===----------------------------------------------------------------------===//
template<typename T> struct DenseMapInfo;
template<typename T> struct DenseMapInfo<ImmutableList<T> > {
static inline ImmutableList<T> getEmptyKey() {
return reinterpret_cast<ImmutableListImpl<T>*>(-1);
}
static inline ImmutableList<T> getTombstoneKey() {
return reinterpret_cast<ImmutableListImpl<T>*>(-2);
}
static unsigned getHashValue(ImmutableList<T> X) {
uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
return (unsigned((uintptr_t)PtrVal) >> 4) ^
(unsigned((uintptr_t)PtrVal) >> 9);
}
static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
return X1 == X2;
}
};
template <typename T> struct isPodLike;
template <typename T>
struct isPodLike<ImmutableList<T> > { static const bool value = true; };
} // end llvm namespace
#endif // LLVM_ADT_IMMUTABLELIST_H

View File

@ -1,408 +0,0 @@
//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ImmutableMap class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_IMMUTABLEMAP_H
#define LLVM_ADT_IMMUTABLEMAP_H
#include "llvm/ADT/ImmutableSet.h"
namespace llvm {
/// ImutKeyValueInfo -Traits class used by ImmutableMap. While both the first
/// and second elements in a pair are used to generate profile information,
/// only the first element (the key) is used by isEqual and isLess.
template <typename T, typename S>
struct ImutKeyValueInfo {
typedef const std::pair<T,S> value_type;
typedef const value_type& value_type_ref;
typedef const T key_type;
typedef const T& key_type_ref;
typedef const S data_type;
typedef const S& data_type_ref;
static inline key_type_ref KeyOfValue(value_type_ref V) {
return V.first;
}
static inline data_type_ref DataOfValue(value_type_ref V) {
return V.second;
}
static inline bool isEqual(key_type_ref L, key_type_ref R) {
return ImutContainerInfo<T>::isEqual(L,R);
}
static inline bool isLess(key_type_ref L, key_type_ref R) {
return ImutContainerInfo<T>::isLess(L,R);
}
static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
return ImutContainerInfo<S>::isEqual(L,R);
}
static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
ImutContainerInfo<T>::Profile(ID, V.first);
ImutContainerInfo<S>::Profile(ID, V.second);
}
};
template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT> >
class ImmutableMap {
public:
typedef typename ValInfo::value_type value_type;
typedef typename ValInfo::value_type_ref value_type_ref;
typedef typename ValInfo::key_type key_type;
typedef typename ValInfo::key_type_ref key_type_ref;
typedef typename ValInfo::data_type data_type;
typedef typename ValInfo::data_type_ref data_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
protected:
TreeTy* Root;
public:
/// Constructs a map from a pointer to a tree root. In general one
/// should use a Factory object to create maps instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
if (Root) { Root->retain(); }
}
ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
if (Root) { Root->retain(); }
}
ImmutableMap &operator=(const ImmutableMap &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
if (Root) { Root->release(); }
Root = X.Root;
}
return *this;
}
~ImmutableMap() {
if (Root) { Root->release(); }
}
class Factory {
typename TreeTy::Factory F;
const bool Canonicalize;
public:
Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}
Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D));
return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
TreeTy *T = F.remove(Old.Root,K);
return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
typename TreeTy::Factory *getTreeFactory() const {
return const_cast<typename TreeTy::Factory *>(&F);
}
private:
Factory(const Factory& RHS) = delete;
void operator=(const Factory& RHS) = delete;
};
bool contains(key_type_ref K) const {
return Root ? Root->contains(K) : false;
}
bool operator==(const ImmutableMap &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
bool operator!=(const ImmutableMap &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
TreeTy *getRoot() const {
if (Root) { Root->retain(); }
return Root;
}
TreeTy *getRootWithoutRetain() const { return Root; }
void manualRetain() {
if (Root) Root->retain();
}
void manualRelease() {
if (Root) Root->release();
}
bool isEmpty() const { return !Root; }
//===--------------------------------------------------===//
// Foreach - A limited form of map iteration.
//===--------------------------------------------------===//
private:
template <typename Callback>
struct CBWrapper {
Callback C;
void operator()(value_type_ref V) { C(V.first,V.second); }
};
template <typename Callback>
struct CBWrapperRef {
Callback &C;
CBWrapperRef(Callback& c) : C(c) {}
void operator()(value_type_ref V) { C(V.first,V.second); }
};
public:
template <typename Callback>
void foreach(Callback& C) {
if (Root) {
CBWrapperRef<Callback> CB(C);
Root->foreach(CB);
}
}
template <typename Callback>
void foreach() {
if (Root) {
CBWrapper<Callback> CB;
Root->foreach(CB);
}
}
//===--------------------------------------------------===//
// For testing.
//===--------------------------------------------------===//
void verify() const { if (Root) Root->verify(); }
//===--------------------------------------------------===//
// Iterators.
//===--------------------------------------------------===//
class iterator : public ImutAVLValueIterator<ImmutableMap> {
iterator() = default;
explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
friend class ImmutableMap;
public:
key_type_ref getKey() const { return (*this)->first; }
data_type_ref getData() const { return (*this)->second; }
};
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }
data_type* lookup(key_type_ref K) const {
if (Root) {
TreeTy* T = Root->find(K);
if (T) return &T->getValue().second;
}
return nullptr;
}
/// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
/// which key is the highest in the ordering of keys in the map. This
/// method returns NULL if the map is empty.
value_type* getMaxElement() const {
return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
}
//===--------------------------------------------------===//
// Utility methods.
//===--------------------------------------------------===//
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
ID.AddPointer(M.Root);
}
inline void Profile(FoldingSetNodeID& ID) const {
return Profile(ID,*this);
}
};
// NOTE: This will possibly become the new implementation of ImmutableMap some day.
template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT> >
class ImmutableMapRef {
public:
typedef typename ValInfo::value_type value_type;
typedef typename ValInfo::value_type_ref value_type_ref;
typedef typename ValInfo::key_type key_type;
typedef typename ValInfo::key_type_ref key_type_ref;
typedef typename ValInfo::data_type data_type;
typedef typename ValInfo::data_type_ref data_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
typedef typename TreeTy::Factory FactoryTy;
protected:
TreeTy *Root;
FactoryTy *Factory;
public:
/// Constructs a map from a pointer to a tree root. In general one
/// should use a Factory object to create maps instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
explicit ImmutableMapRef(const TreeTy *R, FactoryTy *F)
: Root(const_cast<TreeTy *>(R)), Factory(F) {
if (Root) {
Root->retain();
}
}
explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
typename ImmutableMap<KeyT, ValT>::Factory &F)
: Root(X.getRootWithoutRetain()),
Factory(F.getTreeFactory()) {
if (Root) { Root->retain(); }
}
ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) {
if (Root) {
Root->retain();
}
}
ImmutableMapRef &operator=(const ImmutableMapRef &X) {
if (Root != X.Root) {
if (X.Root)
X.Root->retain();
if (Root)
Root->release();
Root = X.Root;
Factory = X.Factory;
}
return *this;
}
~ImmutableMapRef() {
if (Root)
Root->release();
}
static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
return ImmutableMapRef(0, F);
}
void manualRetain() {
if (Root) Root->retain();
}
void manualRelease() {
if (Root) Root->release();
}
ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D));
return ImmutableMapRef(NewT, Factory);
}
ImmutableMapRef remove(key_type_ref K) const {
TreeTy *NewT = Factory->remove(Root, K);
return ImmutableMapRef(NewT, Factory);
}
bool contains(key_type_ref K) const {
return Root ? Root->contains(K) : false;
}
ImmutableMap<KeyT, ValT> asImmutableMap() const {
return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root));
}
bool operator==(const ImmutableMapRef &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
bool operator!=(const ImmutableMapRef &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
bool isEmpty() const { return !Root; }
//===--------------------------------------------------===//
// For testing.
//===--------------------------------------------------===//
void verify() const {
if (Root)
Root->verify();
}
//===--------------------------------------------------===//
// Iterators.
//===--------------------------------------------------===//
class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
iterator() = default;
explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
friend class ImmutableMapRef;
public:
key_type_ref getKey() const { return (*this)->first; }
data_type_ref getData() const { return (*this)->second; }
};
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }
data_type *lookup(key_type_ref K) const {
if (Root) {
TreeTy* T = Root->find(K);
if (T) return &T->getValue().second;
}
return nullptr;
}
/// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
/// which key is the highest in the ordering of keys in the map. This
/// method returns NULL if the map is empty.
value_type* getMaxElement() const {
return Root ? &(Root->getMaxElement()->getValue()) : 0;
}
//===--------------------------------------------------===//
// Utility methods.
//===--------------------------------------------------===//
unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
ID.AddPointer(M.Root);
}
inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
};
} // end namespace llvm
#endif // LLVM_ADT_IMMUTABLEMAP_H

File diff suppressed because it is too large Load Diff

View File

@ -1,85 +0,0 @@
//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an indexed map. The index map template takes two
// types. The first is the mapped type and the second is a functor
// that maps its argument to a size_t. On instantiation a "null" value
// can be provided to be used as a "does not exist" indicator in the
// map. A member function grow() is provided that given the value of
// the maximally indexed key (the argument of the functor) makes sure
// the map has enough space for it.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <functional>
namespace llvm {
template <typename T, typename ToIndexT = llvm::identity<unsigned> >
class IndexedMap {
typedef typename ToIndexT::argument_type IndexT;
// Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
// can grow very large and SmallVector grows more efficiently as long as T
// is trivially copyable.
typedef SmallVector<T, 0> StorageT;
StorageT storage_;
T nullVal_;
ToIndexT toIndex_;
public:
IndexedMap() : nullVal_(T()) { }
explicit IndexedMap(const T& val) : nullVal_(val) { }
typename StorageT::reference operator[](IndexT n) {
assert(toIndex_(n) < storage_.size() && "index out of bounds!");
return storage_[toIndex_(n)];
}
typename StorageT::const_reference operator[](IndexT n) const {
assert(toIndex_(n) < storage_.size() && "index out of bounds!");
return storage_[toIndex_(n)];
}
void reserve(typename StorageT::size_type s) {
storage_.reserve(s);
}
void resize(typename StorageT::size_type s) {
storage_.resize(s, nullVal_);
}
void clear() {
storage_.clear();
}
void grow(IndexT n) {
unsigned NewSize = toIndex_(n) + 1;
if (NewSize > storage_.size())
resize(NewSize);
}
bool inBounds(IndexT n) const {
return toIndex_(n) < storage_.size();
}
typename StorageT::size_type size() const {
return storage_.size();
}
};
} // End llvm namespace
#endif

View File

@ -1,88 +0,0 @@
//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Equivalence classes for small integers. This is a mapping of the integers
// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
//
// Initially each integer has its own equivalence class. Classes are joined by
// passing a representative member of each class to join().
//
// Once the classes are built, compress() will number them 0 .. M-1 and prevent
// further changes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_INTEQCLASSES_H
#define LLVM_ADT_INTEQCLASSES_H
#include "llvm/ADT/SmallVector.h"
namespace llvm {
class IntEqClasses {
/// EC - When uncompressed, map each integer to a smaller member of its
/// equivalence class. The class leader is the smallest member and maps to
/// itself.
///
/// When compressed, EC[i] is the equivalence class of i.
SmallVector<unsigned, 8> EC;
/// NumClasses - The number of equivalence classes when compressed, or 0 when
/// uncompressed.
unsigned NumClasses;
public:
/// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
/// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
/// equivalence classes.
/// This requires an uncompressed map.
void grow(unsigned N);
/// clear - Clear all classes so that grow() will assign a unique class to
/// every integer.
void clear() {
EC.clear();
NumClasses = 0;
}
/// Join the equivalence classes of a and b. After joining classes,
/// findLeader(a) == findLeader(b). This requires an uncompressed map.
/// Returns the new leader.
unsigned join(unsigned a, unsigned b);
/// findLeader - Compute the leader of a's equivalence class. This is the
/// smallest member of the class.
/// This requires an uncompressed map.
unsigned findLeader(unsigned a) const;
/// compress - Compress equivalence classes by numbering them 0 .. M.
/// This makes the equivalence class map immutable.
void compress();
/// getNumClasses - Return the number of equivalence classes after compress()
/// was called.
unsigned getNumClasses() const { return NumClasses; }
/// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
/// This requires a compressed map.
unsigned operator[](unsigned a) const {
assert(NumClasses && "operator[] called before compress()");
return EC[a];
}
/// uncompress - Change back to the uncompressed representation that allows
/// editing.
void uncompress();
};
} // End llvm namespace
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,288 +0,0 @@
//== llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer ---*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines IntrusiveRefCntPtr, a template class that
// implements a "smart" pointer for objects that maintain their own
// internal reference count, and RefCountedBase/RefCountedBaseVPTR, two
// generic base classes for objects that wish to have their lifetimes
// managed using reference counting.
//
// IntrusiveRefCntPtr is similar to Boost's intrusive_ptr with added
// LLVM-style casting.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H
#define LLVM_ADT_INTRUSIVEREFCNTPTR_H
#include <atomic>
#include <cassert>
#include <cstddef>
namespace llvm {
template <class T>
class IntrusiveRefCntPtr;
//===----------------------------------------------------------------------===//
/// RefCountedBase - A generic base class for objects that wish to
/// have their lifetimes managed using reference counts. Classes
/// subclass RefCountedBase to obtain such functionality, and are
/// typically handled with IntrusiveRefCntPtr "smart pointers" (see below)
/// which automatically handle the management of reference counts.
/// Objects that subclass RefCountedBase should not be allocated on
/// the stack, as invoking "delete" (which is called when the
/// reference count hits 0) on such objects is an error.
//===----------------------------------------------------------------------===//
template <class Derived>
class RefCountedBase {
mutable unsigned ref_cnt;
public:
RefCountedBase() : ref_cnt(0) {}
RefCountedBase(const RefCountedBase &) : ref_cnt(0) {}
void Retain() const { ++ref_cnt; }
void Release() const {
assert (ref_cnt > 0 && "Reference count is already zero.");
if (--ref_cnt == 0) delete static_cast<const Derived*>(this);
}
};
//===----------------------------------------------------------------------===//
/// RefCountedBaseVPTR - A class that has the same function as
/// RefCountedBase, but with a virtual destructor. Should be used
/// instead of RefCountedBase for classes that already have virtual
/// methods to enforce dynamic allocation via 'new'. Classes that
/// inherit from RefCountedBaseVPTR can't be allocated on stack -
/// attempting to do this will produce a compile error.
//===----------------------------------------------------------------------===//
class RefCountedBaseVPTR {
mutable unsigned ref_cnt;
virtual void anchor();
protected:
RefCountedBaseVPTR() : ref_cnt(0) {}
RefCountedBaseVPTR(const RefCountedBaseVPTR &) : ref_cnt(0) {}
virtual ~RefCountedBaseVPTR() {}
void Retain() const { ++ref_cnt; }
void Release() const {
assert (ref_cnt > 0 && "Reference count is already zero.");
if (--ref_cnt == 0) delete this;
}
template <typename T>
friend struct IntrusiveRefCntPtrInfo;
};
template <typename T> struct IntrusiveRefCntPtrInfo {
static void retain(T *obj) { obj->Retain(); }
static void release(T *obj) { obj->Release(); }
};
/// \brief A thread-safe version of \c llvm::RefCountedBase.
///
/// A generic base class for objects that wish to have their lifetimes managed
/// using reference counts. Classes subclass \c ThreadSafeRefCountedBase to
/// obtain such functionality, and are typically handled with
/// \c IntrusiveRefCntPtr "smart pointers" which automatically handle the
/// management of reference counts.
template <class Derived>
class ThreadSafeRefCountedBase {
mutable std::atomic<int> RefCount;
protected:
ThreadSafeRefCountedBase() : RefCount(0) {}
public:
void Retain() const { ++RefCount; }
void Release() const {
int NewRefCount = --RefCount;
assert(NewRefCount >= 0 && "Reference count was already zero.");
if (NewRefCount == 0)
delete static_cast<const Derived*>(this);
}
};
//===----------------------------------------------------------------------===//
/// IntrusiveRefCntPtr - A template class that implements a "smart pointer"
/// that assumes the wrapped object has a reference count associated
/// with it that can be managed via calls to
/// IntrusivePtrAddRef/IntrusivePtrRelease. The smart pointers
/// manage reference counts via the RAII idiom: upon creation of
/// smart pointer the reference count of the wrapped object is
/// incremented and upon destruction of the smart pointer the
/// reference count is decremented. This class also safely handles
/// wrapping NULL pointers.
///
/// Reference counting is implemented via calls to
/// Obj->Retain()/Obj->Release(). Release() is required to destroy
/// the object when the reference count reaches zero. Inheriting from
/// RefCountedBase/RefCountedBaseVPTR takes care of this
/// automatically.
//===----------------------------------------------------------------------===//
template <typename T>
class IntrusiveRefCntPtr {
T* Obj;
public:
typedef T element_type;
explicit IntrusiveRefCntPtr() : Obj(nullptr) {}
IntrusiveRefCntPtr(T* obj) : Obj(obj) {
retain();
}
IntrusiveRefCntPtr(const IntrusiveRefCntPtr& S) : Obj(S.Obj) {
retain();
}
IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
S.Obj = nullptr;
}
template <class X>
IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.get()) {
S.Obj = nullptr;
}
template <class X>
IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
: Obj(S.get()) {
retain();
}
IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) {
swap(S);
return *this;
}
~IntrusiveRefCntPtr() { release(); }
T& operator*() const { return *Obj; }
T* operator->() const { return Obj; }
T* get() const { return Obj; }
explicit operator bool() const { return Obj; }
void swap(IntrusiveRefCntPtr& other) {
T* tmp = other.Obj;
other.Obj = Obj;
Obj = tmp;
}
void reset() {
release();
Obj = nullptr;
}
void resetWithoutRelease() {
Obj = nullptr;
}
private:
void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
template <typename X>
friend class IntrusiveRefCntPtr;
};
template<class T, class U>
inline bool operator==(const IntrusiveRefCntPtr<T>& A,
const IntrusiveRefCntPtr<U>& B)
{
return A.get() == B.get();
}
template<class T, class U>
inline bool operator!=(const IntrusiveRefCntPtr<T>& A,
const IntrusiveRefCntPtr<U>& B)
{
return A.get() != B.get();
}
template<class T, class U>
inline bool operator==(const IntrusiveRefCntPtr<T>& A,
U* B)
{
return A.get() == B;
}
template<class T, class U>
inline bool operator!=(const IntrusiveRefCntPtr<T>& A,
U* B)
{
return A.get() != B;
}
template<class T, class U>
inline bool operator==(T* A,
const IntrusiveRefCntPtr<U>& B)
{
return A == B.get();
}
template<class T, class U>
inline bool operator!=(T* A,
const IntrusiveRefCntPtr<U>& B)
{
return A != B.get();
}
template <class T>
bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
return !B;
}
template <class T>
bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
return B == A;
}
template <class T>
bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
return !(A == B);
}
template <class T>
bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
return !(A == B);
}
//===----------------------------------------------------------------------===//
// LLVM-style downcasting support for IntrusiveRefCntPtr objects
//===----------------------------------------------------------------------===//
template <typename From> struct simplify_type;
template<class T> struct simplify_type<IntrusiveRefCntPtr<T> > {
typedef T* SimpleType;
static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T>& Val) {
return Val.get();
}
};
template<class T> struct simplify_type<const IntrusiveRefCntPtr<T> > {
typedef /*const*/ T* SimpleType;
static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T>& Val) {
return Val.get();
}
};
} // end namespace llvm
#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H

View File

@ -1,200 +0,0 @@
//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a map that provides insertion order iteration. The
// interface is purposefully minimal. The key is assumed to be cheap to copy
// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
// a std::vector.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_MAPVECTOR_H
#define LLVM_ADT_MAPVECTOR_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <vector>
namespace llvm {
/// This class implements a map that also provides access to all stored values
/// in a deterministic order. The values are kept in a std::vector and the
/// mapping is done with DenseMap from Keys to indexes in that vector.
template<typename KeyT, typename ValueT,
typename MapType = llvm::DenseMap<KeyT, unsigned>,
typename VectorType = std::vector<std::pair<KeyT, ValueT> > >
class MapVector {
typedef typename VectorType::size_type size_type;
MapType Map;
VectorType Vector;
public:
typedef typename VectorType::iterator iterator;
typedef typename VectorType::const_iterator const_iterator;
typedef typename VectorType::reverse_iterator reverse_iterator;
typedef typename VectorType::const_reverse_iterator const_reverse_iterator;
size_type size() const { return Vector.size(); }
iterator begin() { return Vector.begin(); }
const_iterator begin() const { return Vector.begin(); }
iterator end() { return Vector.end(); }
const_iterator end() const { return Vector.end(); }
reverse_iterator rbegin() { return Vector.rbegin(); }
const_reverse_iterator rbegin() const { return Vector.rbegin(); }
reverse_iterator rend() { return Vector.rend(); }
const_reverse_iterator rend() const { return Vector.rend(); }
bool empty() const {
return Vector.empty();
}
std::pair<KeyT, ValueT> &front() { return Vector.front(); }
const std::pair<KeyT, ValueT> &front() const { return Vector.front(); }
std::pair<KeyT, ValueT> &back() { return Vector.back(); }
const std::pair<KeyT, ValueT> &back() const { return Vector.back(); }
void clear() {
Map.clear();
Vector.clear();
}
void swap(MapVector &RHS) {
std::swap(Map, RHS.Map);
std::swap(Vector, RHS.Vector);
}
ValueT &operator[](const KeyT &Key) {
std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0);
std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
unsigned &I = Result.first->second;
if (Result.second) {
Vector.push_back(std::make_pair(Key, ValueT()));
I = Vector.size() - 1;
}
return Vector[I].second;
}
ValueT lookup(const KeyT &Key) const {
typename MapType::const_iterator Pos = Map.find(Key);
return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
}
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
unsigned &I = Result.first->second;
if (Result.second) {
Vector.push_back(std::make_pair(KV.first, KV.second));
I = Vector.size() - 1;
return std::make_pair(std::prev(end()), true);
}
return std::make_pair(begin() + I, false);
}
size_type count(const KeyT &Key) const {
typename MapType::const_iterator Pos = Map.find(Key);
return Pos == Map.end()? 0 : 1;
}
iterator find(const KeyT &Key) {
typename MapType::const_iterator Pos = Map.find(Key);
return Pos == Map.end()? Vector.end() :
(Vector.begin() + Pos->second);
}
const_iterator find(const KeyT &Key) const {
typename MapType::const_iterator Pos = Map.find(Key);
return Pos == Map.end()? Vector.end() :
(Vector.begin() + Pos->second);
}
/// \brief Remove the last element from the vector.
void pop_back() {
typename MapType::iterator Pos = Map.find(Vector.back().first);
Map.erase(Pos);
Vector.pop_back();
}
/// \brief Remove the element given by Iterator.
///
/// Returns an iterator to the element following the one which was removed,
/// which may be end().
///
/// \note This is a deceivingly expensive operation (linear time). It's
/// usually better to use \a remove_if() if possible.
typename VectorType::iterator erase(typename VectorType::iterator Iterator) {
Map.erase(Iterator->first);
auto Next = Vector.erase(Iterator);
if (Next == Vector.end())
return Next;
// Update indices in the map.
size_t Index = Next - Vector.begin();
for (auto &I : Map) {
assert(I.second != Index && "Index was already erased!");
if (I.second > Index)
--I.second;
}
return Next;
}
/// \brief Remove all elements with the key value Key.
///
/// Returns the number of elements removed.
size_type erase(const KeyT &Key) {
auto Iterator = find(Key);
if (Iterator == end())
return 0;
erase(Iterator);
return 1;
}
/// \brief Remove the elements that match the predicate.
///
/// Erase all elements that match \c Pred in a single pass. Takes linear
/// time.
template <class Predicate> void remove_if(Predicate Pred);
};
template <typename KeyT, typename ValueT, typename MapType, typename VectorType>
template <class Function>
void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
auto O = Vector.begin();
for (auto I = O, E = Vector.end(); I != E; ++I) {
if (Pred(*I)) {
// Erase from the map.
Map.erase(I->first);
continue;
}
if (I != O) {
// Move the value and update the index in the map.
*O = std::move(*I);
Map[O->first] = O - Vector.begin();
}
++O;
}
// Erase trailing entries in the vector.
Vector.erase(O, Vector.end());
}
/// \brief A MapVector that performs no allocations if smaller than a certain
/// size.
template <typename KeyT, typename ValueT, unsigned N>
struct SmallMapVector
: MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>,
SmallVector<std::pair<KeyT, ValueT>, N>> {
};
} // end namespace llvm
#endif

View File

@ -1,26 +0,0 @@
//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides None, an enumerator for use in implicit constructors
// of various (usually templated) types to make such construction more
// terse.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_NONE_H
#define LLVM_ADT_NONE_H
namespace llvm {
/// \brief A simple null object to allow implicit construction of Optional<T>
/// and similar types without having to spell out the specialization's name.
enum class NoneType { None };
const NoneType None = None;
}
#endif

View File

@ -1,228 +0,0 @@
//===-- Optional.h - Simple variant for passing optional values ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides Optional, a template class modeled in the spirit of
// OCaml's 'opt' variant. The idea is to strongly type whether or not
// a value can be optional.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_OPTIONAL_H
#define LLVM_ADT_OPTIONAL_H
#include "llvm/ADT/None.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <new>
#include <utility>
namespace llvm {
template<typename T>
class Optional {
AlignedCharArrayUnion<T> storage;
bool hasVal;
public:
typedef T value_type;
Optional(NoneType) : hasVal(false) {}
explicit Optional() : hasVal(false) {}
Optional(const T &y) : hasVal(true) {
new (storage.buffer) T(y);
}
Optional(const Optional &O) : hasVal(O.hasVal) {
if (hasVal)
new (storage.buffer) T(*O);
}
Optional(T &&y) : hasVal(true) {
new (storage.buffer) T(std::forward<T>(y));
}
Optional(Optional<T> &&O) : hasVal(O) {
if (O) {
new (storage.buffer) T(std::move(*O));
O.reset();
}
}
Optional &operator=(T &&y) {
if (hasVal)
**this = std::move(y);
else {
new (storage.buffer) T(std::move(y));
hasVal = true;
}
return *this;
}
Optional &operator=(Optional &&O) {
if (!O)
reset();
else {
*this = std::move(*O);
O.reset();
}
return *this;
}
/// Create a new object by constructing it in place with the given arguments.
template<typename ...ArgTypes>
void emplace(ArgTypes &&...Args) {
reset();
hasVal = true;
new (storage.buffer) T(std::forward<ArgTypes>(Args)...);
}
static inline Optional create(const T* y) {
return y ? Optional(*y) : Optional();
}
// FIXME: these assignments (& the equivalent const T&/const Optional& ctors)
// could be made more efficient by passing by value, possibly unifying them
// with the rvalue versions above - but this could place a different set of
// requirements (notably: the existence of a default ctor) when implemented
// in that way. Careful SFINAE to avoid such pitfalls would be required.
Optional &operator=(const T &y) {
if (hasVal)
**this = y;
else {
new (storage.buffer) T(y);
hasVal = true;
}
return *this;
}
Optional &operator=(const Optional &O) {
if (!O)
reset();
else
*this = *O;
return *this;
}
void reset() {
if (hasVal) {
(**this).~T();
hasVal = false;
}
}
~Optional() {
reset();
}
const T* getPointer() const { assert(hasVal); return reinterpret_cast<const T*>(storage.buffer); }
T* getPointer() { assert(hasVal); return reinterpret_cast<T*>(storage.buffer); }
const T& getValue() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
T& getValue() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
explicit operator bool() const { return hasVal; }
bool hasValue() const { return hasVal; }
const T* operator->() const { return getPointer(); }
T* operator->() { return getPointer(); }
const T& operator*() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
T& operator*() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
template <typename U>
LLVM_CONSTEXPR T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
return hasValue() ? getValue() : std::forward<U>(value);
}
#if LLVM_HAS_RVALUE_REFERENCE_THIS
T&& getValue() && { assert(hasVal); return std::move(*getPointer()); }
T&& operator*() && { assert(hasVal); return std::move(*getPointer()); }
template <typename U>
T getValueOr(U &&value) && {
return hasValue() ? std::move(getValue()) : std::forward<U>(value);
}
#endif
};
template <typename T> struct isPodLike;
template <typename T> struct isPodLike<Optional<T> > {
// An Optional<T> is pod-like if T is.
static const bool value = isPodLike<T>::value;
};
/// \brief Poison comparison between two \c Optional objects. Clients needs to
/// explicitly compare the underlying values and account for empty \c Optional
/// objects.
///
/// This routine will never be defined. It returns \c void to help diagnose
/// errors at compile time.
template<typename T, typename U>
void operator==(const Optional<T> &X, const Optional<U> &Y);
template<typename T>
bool operator==(const Optional<T> &X, NoneType) {
return !X.hasValue();
}
template<typename T>
bool operator==(NoneType, const Optional<T> &X) {
return X == None;
}
template<typename T>
bool operator!=(const Optional<T> &X, NoneType) {
return !(X == None);
}
template<typename T>
bool operator!=(NoneType, const Optional<T> &X) {
return X != None;
}
/// \brief Poison comparison between two \c Optional objects. Clients needs to
/// explicitly compare the underlying values and account for empty \c Optional
/// objects.
///
/// This routine will never be defined. It returns \c void to help diagnose
/// errors at compile time.
template<typename T, typename U>
void operator!=(const Optional<T> &X, const Optional<U> &Y);
/// \brief Poison comparison between two \c Optional objects. Clients needs to
/// explicitly compare the underlying values and account for empty \c Optional
/// objects.
///
/// This routine will never be defined. It returns \c void to help diagnose
/// errors at compile time.
template<typename T, typename U>
void operator<(const Optional<T> &X, const Optional<U> &Y);
/// \brief Poison comparison between two \c Optional objects. Clients needs to
/// explicitly compare the underlying values and account for empty \c Optional
/// objects.
///
/// This routine will never be defined. It returns \c void to help diagnose
/// errors at compile time.
template<typename T, typename U>
void operator<=(const Optional<T> &X, const Optional<U> &Y);
/// \brief Poison comparison between two \c Optional objects. Clients needs to
/// explicitly compare the underlying values and account for empty \c Optional
/// objects.
///
/// This routine will never be defined. It returns \c void to help diagnose
/// errors at compile time.
template<typename T, typename U>
void operator>=(const Optional<T> &X, const Optional<U> &Y);
/// \brief Poison comparison between two \c Optional objects. Clients needs to
/// explicitly compare the underlying values and account for empty \c Optional
/// objects.
///
/// This routine will never be defined. It returns \c void to help diagnose
/// errors at compile time.
template<typename T, typename U>
void operator>(const Optional<T> &X, const Optional<U> &Y);
} // end llvm namespace
#endif

View File

@ -1,149 +0,0 @@
//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the PackedVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_PACKEDVECTOR_H
#define LLVM_ADT_PACKEDVECTOR_H
#include "llvm/ADT/BitVector.h"
#include <limits>
namespace llvm {
template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
class PackedVectorBase;
// This won't be necessary if we can specialize members without specializing
// the parent template.
template <typename T, unsigned BitNum, typename BitVectorTy>
class PackedVectorBase<T, BitNum, BitVectorTy, false> {
protected:
static T getValue(const BitVectorTy &Bits, unsigned Idx) {
T val = T();
for (unsigned i = 0; i != BitNum; ++i)
val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
return val;
}
static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
assert((val >> BitNum) == 0 && "value is too big");
for (unsigned i = 0; i != BitNum; ++i)
Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
}
};
template <typename T, unsigned BitNum, typename BitVectorTy>
class PackedVectorBase<T, BitNum, BitVectorTy, true> {
protected:
static T getValue(const BitVectorTy &Bits, unsigned Idx) {
T val = T();
for (unsigned i = 0; i != BitNum-1; ++i)
val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
if (Bits[(Idx << (BitNum-1)) + BitNum-1])
val = ~val;
return val;
}
static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
if (val < 0) {
val = ~val;
Bits.set((Idx << (BitNum-1)) + BitNum-1);
}
assert((val >> (BitNum-1)) == 0 && "value is too big");
for (unsigned i = 0; i != BitNum-1; ++i)
Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
}
};
/// \brief Store a vector of values using a specific number of bits for each
/// value. Both signed and unsigned types can be used, e.g
/// @code
/// PackedVector<signed, 2> vec;
/// @endcode
/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
/// an assertion.
template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed> {
BitVectorTy Bits;
typedef PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed> base;
public:
class reference {
PackedVector &Vec;
const unsigned Idx;
reference(); // Undefined
public:
reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}
reference &operator=(T val) {
Vec.setValue(Vec.Bits, Idx, val);
return *this;
}
operator T() const {
return Vec.getValue(Vec.Bits, Idx);
}
};
PackedVector() = default;
explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) { }
bool empty() const { return Bits.empty(); }
unsigned size() const { return Bits.size() >> (BitNum - 1); }
void clear() { Bits.clear(); }
void resize(unsigned N) { Bits.resize(N << (BitNum - 1)); }
void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }
PackedVector &reset() {
Bits.reset();
return *this;
}
void push_back(T val) {
resize(size()+1);
(*this)[size()-1] = val;
}
reference operator[](unsigned Idx) {
return reference(*this, Idx);
}
T operator[](unsigned Idx) const {
return base::getValue(Bits, Idx);
}
bool operator==(const PackedVector &RHS) const {
return Bits == RHS.Bits;
}
bool operator!=(const PackedVector &RHS) const {
return Bits != RHS.Bits;
}
PackedVector &operator|=(const PackedVector &RHS) {
Bits |= RHS.Bits;
return *this;
}
};
// Leave BitNum=0 undefined.
template <typename T> class PackedVector<T, 0>;
} // end llvm namespace
#endif

View File

@ -1,117 +0,0 @@
//===- llvm/ADT/PointerEmbeddedInt.h ----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_POINTEREMBEDDEDINT_H
#define LLVM_ADT_POINTEREMBEDDEDINT_H
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <climits>
namespace llvm {
/// Utility to embed an integer into a pointer-like type. This is specifically
/// intended to allow embedding integers where fewer bits are required than
/// exist in a pointer, and the integer can participate in abstractions along
/// side other pointer-like types. For example it can be placed into a \c
/// PointerSumType or \c PointerUnion.
///
/// Note that much like pointers, an integer value of zero has special utility
/// due to boolean conversions. For example, a non-null value can be tested for
/// in the above abstractions without testing the particular active member.
/// Also, the default constructed value zero initializes the integer.
template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
class PointerEmbeddedInt {
uintptr_t Value;
// Note: This '<' is correct; using '<=' would result in some shifts
// overflowing their storage types.
static_assert(Bits < sizeof(uintptr_t) * CHAR_BIT,
"Cannot embed more bits than we have in a pointer!");
enum : uintptr_t {
// We shift as many zeros into the value as we can while preserving the
// number of bits desired for the integer.
Shift = sizeof(uintptr_t) * CHAR_BIT - Bits,
// We also want to be able to mask out the preserved bits for asserts.
Mask = static_cast<uintptr_t>(-1) << Bits
};
struct RawValueTag {
explicit RawValueTag() = default;
};
friend class PointerLikeTypeTraits<PointerEmbeddedInt>;
explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}
public:
PointerEmbeddedInt() : Value(0) {}
PointerEmbeddedInt(IntT I) {
*this = I;
}
PointerEmbeddedInt &operator=(IntT I) {
assert((std::is_signed<IntT>::value ? llvm::isInt<Bits>(I)
: llvm::isUInt<Bits>(I)) &&
"Integer has bits outside those preserved!");
Value = static_cast<uintptr_t>(I) << Shift;
return *this;
}
// Note that this implicit conversion additionally allows all of the basic
// comparison operators to work transparently, etc.
operator IntT() const {
if (std::is_signed<IntT>::value)
return static_cast<IntT>(static_cast<intptr_t>(Value) >> Shift);
return static_cast<IntT>(Value >> Shift);
}
};
// Provide pointer like traits to support use with pointer unions and sum
// types.
template <typename IntT, int Bits>
class PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
typedef PointerEmbeddedInt<IntT, Bits> T;
public:
static inline void *getAsVoidPointer(const T &P) {
return reinterpret_cast<void *>(P.Value);
}
static inline T getFromVoidPointer(void *P) {
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
static inline T getFromVoidPointer(const void *P) {
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
enum { NumLowBitsAvailable = T::Shift };
};
// Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type
// itself can be a key.
template <typename IntT, int Bits>
struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
typedef PointerEmbeddedInt<IntT, Bits> T;
typedef DenseMapInfo<IntT> IntInfo;
static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }
static unsigned getHashValue(const T &Arg) {
return IntInfo::getHashValue(Arg);
}
static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
};
}
#endif

View File

@ -1,223 +0,0 @@
//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PointerIntPair class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_POINTERINTPAIR_H
#define LLVM_ADT_POINTERINTPAIR_H
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <limits>
namespace llvm {
template <typename T> struct DenseMapInfo;
template <typename PointerT, unsigned IntBits, typename PtrTraits>
struct PointerIntPairInfo;
/// PointerIntPair - This class implements a pair of a pointer and small
/// integer. It is designed to represent this in the space required by one
/// pointer by bitmangling the integer into the low part of the pointer. This
/// can only be done for small integers: typically up to 3 bits, but it depends
/// on the number of bits available according to PointerLikeTypeTraits for the
/// type.
///
/// Note that PointerIntPair always puts the IntVal part in the highest bits
/// possible. For example, PointerIntPair<void*, 1, bool> will put the bit for
/// the bool into bit #2, not bit #0, which allows the low two bits to be used
/// for something else. For example, this allows:
/// PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
/// ... and the two bools will land in different bits.
///
template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
class PointerIntPair {
intptr_t Value;
public:
PointerIntPair() : Value(0) {}
PointerIntPair(PointerTy PtrVal, IntType IntVal) {
setPointerAndInt(PtrVal, IntVal);
}
explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
PointerTy getPointer() const { return Info::getPointer(Value); }
IntType getInt() const {
return (IntType)Info::getInt(Value);
}
void setPointer(PointerTy PtrVal) {
Value = Info::updatePointer(Value, PtrVal);
}
void setInt(IntType IntVal) {
Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
}
void initWithPointer(PointerTy PtrVal) {
Value = Info::updatePointer(0, PtrVal);
}
void setPointerAndInt(PointerTy PtrVal, IntType IntVal) {
Value = Info::updateInt(Info::updatePointer(0, PtrVal),
static_cast<intptr_t>(IntVal));
}
PointerTy const *getAddrOfPointer() const {
return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
}
PointerTy *getAddrOfPointer() {
assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
"Can only return the address if IntBits is cleared and "
"PtrTraits doesn't change the pointer");
return reinterpret_cast<PointerTy *>(&Value);
}
void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
void setFromOpaqueValue(void *Val) {
Value = reinterpret_cast<intptr_t>(Val);
}
static PointerIntPair getFromOpaqueValue(void *V) {
PointerIntPair P;
P.setFromOpaqueValue(V);
return P;
}
// Allow PointerIntPairs to be created from const void * if and only if the
// pointer type could be created from a const void *.
static PointerIntPair getFromOpaqueValue(const void *V) {
(void)PtrTraits::getFromVoidPointer(V);
return getFromOpaqueValue(const_cast<void *>(V));
}
bool operator==(const PointerIntPair &RHS) const {
return Value == RHS.Value;
}
bool operator!=(const PointerIntPair &RHS) const {
return Value != RHS.Value;
}
bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
bool operator<=(const PointerIntPair &RHS) const {
return Value <= RHS.Value;
}
bool operator>=(const PointerIntPair &RHS) const {
return Value >= RHS.Value;
}
};
template <typename PointerT, unsigned IntBits, typename PtrTraits>
struct PointerIntPairInfo {
static_assert(PtrTraits::NumLowBitsAvailable <
std::numeric_limits<uintptr_t>::digits,
"cannot use a pointer type that has all bits free");
static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
"PointerIntPair with integer size too large for pointer");
enum : uintptr_t {
/// PointerBitMask - The bits that come from the pointer.
PointerBitMask =
~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
/// IntShift - The number of low bits that we reserve for other uses, and
/// keep zero.
IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
/// IntMask - This is the unshifted mask for valid bits of the int type.
IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
// ShiftedIntMask - This is the bits for the integer shifted in place.
ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
};
static PointerT getPointer(intptr_t Value) {
return PtrTraits::getFromVoidPointer(
reinterpret_cast<void *>(Value & PointerBitMask));
}
static intptr_t getInt(intptr_t Value) {
return (Value >> IntShift) & IntMask;
}
static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
intptr_t PtrWord =
reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
assert((PtrWord & ~PointerBitMask) == 0 &&
"Pointer is not sufficiently aligned");
// Preserve all low bits, just update the pointer.
return PtrWord | (OrigValue & ~PointerBitMask);
}
static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
intptr_t IntWord = static_cast<intptr_t>(Int);
assert((IntWord & ~IntMask) == 0 && "Integer too large for field");
// Preserve all bits other than the ones we are updating.
return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
}
};
template <typename T> struct isPodLike;
template <typename PointerTy, unsigned IntBits, typename IntType>
struct isPodLike<PointerIntPair<PointerTy, IntBits, IntType>> {
static const bool value = true;
};
// Provide specialization of DenseMapInfo for PointerIntPair.
template <typename PointerTy, unsigned IntBits, typename IntType>
struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>> {
typedef PointerIntPair<PointerTy, IntBits, IntType> Ty;
static Ty getEmptyKey() {
uintptr_t Val = static_cast<uintptr_t>(-1);
Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
}
static Ty getTombstoneKey() {
uintptr_t Val = static_cast<uintptr_t>(-2);
Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
}
static unsigned getHashValue(Ty V) {
uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
return unsigned(IV) ^ unsigned(IV >> 9);
}
static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
};
// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
template <typename PointerTy, unsigned IntBits, typename IntType,
typename PtrTraits>
class PointerLikeTypeTraits<
PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
public:
static inline void *
getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
return P.getOpaqueValue();
}
static inline PointerIntPair<PointerTy, IntBits, IntType>
getFromVoidPointer(void *P) {
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
static inline PointerIntPair<PointerTy, IntBits, IntType>
getFromVoidPointer(const void *P) {
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits };
};
} // end namespace llvm
#endif

View File

@ -1,205 +0,0 @@
//===- llvm/ADT/PointerSumType.h --------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_POINTERSUMTYPE_H
#define LLVM_ADT_POINTERSUMTYPE_H
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
namespace llvm {
/// A compile time pair of an integer tag and the pointer-like type which it
/// indexes within a sum type. Also allows the user to specify a particular
/// traits class for pointer types with custom behavior such as over-aligned
/// allocation.
template <uintptr_t N, typename PointerArgT,
typename TraitsArgT = PointerLikeTypeTraits<PointerArgT>>
struct PointerSumTypeMember {
enum { Tag = N };
typedef PointerArgT PointerT;
typedef TraitsArgT TraitsT;
};
namespace detail {
template <typename TagT, typename... MemberTs>
struct PointerSumTypeHelper;
}
/// A sum type over pointer-like types.
///
/// This is a normal tagged union across pointer-like types that uses the low
/// bits of the pointers to store the tag.
///
/// Each member of the sum type is specified by passing a \c
/// PointerSumTypeMember specialization in the variadic member argument list.
/// This allows the user to control the particular tag value associated with
/// a particular type, use the same type for multiple different tags, and
/// customize the pointer-like traits used for a particular member. Note that
/// these *must* be specializations of \c PointerSumTypeMember, no other type
/// will suffice, even if it provides a compatible interface.
///
/// This type implements all of the comparison operators and even hash table
/// support by comparing the underlying storage of the pointer values. It
/// doesn't support delegating to particular members for comparisons.
///
/// It also default constructs to a zero tag with a null pointer, whatever that
/// would be. This means that the zero value for the tag type is significant
/// and may be desireable to set to a state that is particularly desirable to
/// default construct.
///
/// There is no support for constructing or accessing with a dynamic tag as
/// that would fundamentally violate the type safety provided by the sum type.
template <typename TagT, typename... MemberTs> class PointerSumType {
uintptr_t Value;
typedef detail::PointerSumTypeHelper<TagT, MemberTs...> HelperT;
public:
PointerSumType() : Value(0) {}
/// A typed constructor for a specific tagged member of the sum type.
template <TagT N>
static PointerSumType
create(typename HelperT::template Lookup<N>::PointerT Pointer) {
PointerSumType Result;
void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
"Pointer is insufficiently aligned to store the discriminant!");
Result.Value = reinterpret_cast<uintptr_t>(V) | N;
return Result;
}
TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
template <TagT N> bool is() const { return N == getTag(); }
template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
void *P = is<N>() ? getImpl() : nullptr;
return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
}
template <TagT N>
typename HelperT::template Lookup<N>::PointerT cast() const {
assert(is<N>() && "This instance has a different active member.");
return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
}
operator bool() const { return Value & HelperT::PointerMask; }
bool operator==(const PointerSumType &R) const { return Value == R.Value; }
bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
bool operator<(const PointerSumType &R) const { return Value < R.Value; }
bool operator>(const PointerSumType &R) const { return Value > R.Value; }
bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
uintptr_t getOpaqueValue() const { return Value; }
protected:
void *getImpl() const {
return reinterpret_cast<void *>(Value & HelperT::PointerMask);
}
};
namespace detail {
/// A helper template for implementing \c PointerSumType. It provides fast
/// compile-time lookup of the member from a particular tag value, along with
/// useful constants and compile time checking infrastructure..
template <typename TagT, typename... MemberTs>
struct PointerSumTypeHelper : MemberTs... {
// First we use a trick to allow quickly looking up information about
// a particular member of the sum type. This works because we arranged to
// have this type derive from all of the member type templates. We can select
// the matching member for a tag using type deduction during overload
// resolution.
template <TagT N, typename PointerT, typename TraitsT>
static PointerSumTypeMember<N, PointerT, TraitsT>
LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
template <TagT N> static void LookupOverload(...);
template <TagT N> struct Lookup {
// Compute a particular member type by resolving the lookup helper ovorload.
typedef decltype(LookupOverload<N>(
static_cast<PointerSumTypeHelper *>(nullptr))) MemberT;
/// The Nth member's pointer type.
typedef typename MemberT::PointerT PointerT;
/// The Nth member's traits type.
typedef typename MemberT::TraitsT TraitsT;
};
// Next we need to compute the number of bits available for the discriminant
// by taking the min of the bits available for each member. Much of this
// would be amazingly easier with good constexpr support.
template <uintptr_t V, uintptr_t... Vs>
struct Min : std::integral_constant<
uintptr_t, (V < Min<Vs...>::value ? V : Min<Vs...>::value)> {
};
template <uintptr_t V>
struct Min<V> : std::integral_constant<uintptr_t, V> {};
enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
// Also compute the smallest discriminant and various masks for convenience.
enum : uint64_t {
MinTag = Min<MemberTs::Tag...>::value,
PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
TagMask = ~PointerMask
};
// Finally we need a recursive template to do static checks of each
// member.
template <typename MemberT, typename... InnerMemberTs>
struct Checker : Checker<InnerMemberTs...> {
static_assert(MemberT::Tag < (1 << NumTagBits),
"This discriminant value requires too many bits!");
};
template <typename MemberT> struct Checker<MemberT> : std::true_type {
static_assert(MemberT::Tag < (1 << NumTagBits),
"This discriminant value requires too many bits!");
};
static_assert(Checker<MemberTs...>::value,
"Each member must pass the checker.");
};
}
// Teach DenseMap how to use PointerSumTypes as keys.
template <typename TagT, typename... MemberTs>
struct DenseMapInfo<PointerSumType<TagT, MemberTs...>> {
typedef PointerSumType<TagT, MemberTs...> SumType;
typedef detail::PointerSumTypeHelper<TagT, MemberTs...> HelperT;
enum { SomeTag = HelperT::MinTag };
typedef typename HelperT::template Lookup<HelperT::MinTag>::PointerT
SomePointerT;
typedef DenseMapInfo<SomePointerT> SomePointerInfo;
static inline SumType getEmptyKey() {
return SumType::create<SomeTag>(SomePointerInfo::getEmptyKey());
}
static inline SumType getTombstoneKey() {
return SumType::create<SomeTag>(
SomePointerInfo::getTombstoneKey());
}
static unsigned getHashValue(const SumType &Arg) {
uintptr_t OpaqueValue = Arg.getOpaqueValue();
return DenseMapInfo<uintptr_t>::getHashValue(OpaqueValue);
}
static bool isEqual(const SumType &LHS, const SumType &RHS) {
return LHS == RHS;
}
};
}
#endif

View File

@ -1,474 +0,0 @@
//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PointerUnion class, which is a discriminated union of
// pointer types.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_POINTERUNION_H
#define LLVM_ADT_POINTERUNION_H
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
template <typename T> struct PointerUnionTypeSelectorReturn {
typedef T Return;
};
/// Get a type based on whether two types are the same or not.
///
/// For:
///
/// \code
/// typedef typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return Ret;
/// \endcode
///
/// Ret will be EQ type if T1 is same as T2 or NE type otherwise.
template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
struct PointerUnionTypeSelector {
typedef typename PointerUnionTypeSelectorReturn<RET_NE>::Return Return;
};
template <typename T, typename RET_EQ, typename RET_NE>
struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> {
typedef typename PointerUnionTypeSelectorReturn<RET_EQ>::Return Return;
};
template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
struct PointerUnionTypeSelectorReturn<
PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>> {
typedef
typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return Return;
};
/// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
/// for the two template arguments.
template <typename PT1, typename PT2> class PointerUnionUIntTraits {
public:
static inline void *getAsVoidPointer(void *P) { return P; }
static inline void *getFromVoidPointer(void *P) { return P; }
enum {
PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
};
};
/// A discriminated union of two pointer types, with the discriminator in the
/// low bit of the pointer.
///
/// This implementation is extremely efficient in space due to leveraging the
/// low bits of the pointer, while exposing a natural and type-safe API.
///
/// Common use patterns would be something like this:
/// PointerUnion<int*, float*> P;
/// P = (int*)0;
/// printf("%d %d", P.is<int*>(), P.is<float*>()); // prints "1 0"
/// X = P.get<int*>(); // ok.
/// Y = P.get<float*>(); // runtime assertion failure.
/// Z = P.get<double*>(); // compile time failure.
/// P = (float*)0;
/// Y = P.get<float*>(); // ok.
/// X = P.get<int*>(); // runtime assertion failure.
template <typename PT1, typename PT2> class PointerUnion {
public:
typedef PointerIntPair<void *, 1, bool, PointerUnionUIntTraits<PT1, PT2>>
ValTy;
private:
ValTy Val;
struct IsPT1 {
static const int Num = 0;
};
struct IsPT2 {
static const int Num = 1;
};
template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {};
public:
PointerUnion() {}
PointerUnion(PT1 V)
: Val(const_cast<void *>(
PointerLikeTypeTraits<PT1>::getAsVoidPointer(V))) {}
PointerUnion(PT2 V)
: Val(const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(V)),
1) {}
/// Test if the pointer held in the union is null, regardless of
/// which type it is.
bool isNull() const {
// Convert from the void* to one of the pointer types, to make sure that
// we recursively strip off low bits if we have a nested PointerUnion.
return !PointerLikeTypeTraits<PT1>::getFromVoidPointer(Val.getPointer());
}
explicit operator bool() const { return !isNull(); }
/// Test if the Union currently holds the type matching T.
template <typename T> int is() const {
typedef typename ::llvm::PointerUnionTypeSelector<
PT1, T, IsPT1, ::llvm::PointerUnionTypeSelector<
PT2, T, IsPT2, UNION_DOESNT_CONTAIN_TYPE<T>>>::Return
Ty;
int TyNo = Ty::Num;
return static_cast<int>(Val.getInt()) == TyNo;
}
/// Returns the value of the specified pointer type.
///
/// If the specified pointer type is incorrect, assert.
template <typename T> T get() const {
assert(is<T>() && "Invalid accessor called");
return PointerLikeTypeTraits<T>::getFromVoidPointer(Val.getPointer());
}
/// Returns the current pointer if it is of the specified pointer type,
/// otherwises returns null.
template <typename T> T dyn_cast() const {
if (is<T>())
return get<T>();
return T();
}
/// If the union is set to the first pointer type get an address pointing to
/// it.
PT1 const *getAddrOfPtr1() const {
return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
}
/// If the union is set to the first pointer type get an address pointing to
/// it.
PT1 *getAddrOfPtr1() {
assert(is<PT1>() && "Val is not the first pointer");
assert(
get<PT1>() == Val.getPointer() &&
"Can't get the address because PointerLikeTypeTraits changes the ptr");
return (PT1 *)Val.getAddrOfPointer();
}
/// Assignment from nullptr which just clears the union.
const PointerUnion &operator=(std::nullptr_t) {
Val.initWithPointer(nullptr);
return *this;
}
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
const PointerUnion &operator=(const PT1 &RHS) {
Val.initWithPointer(
const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(RHS)));
return *this;
}
const PointerUnion &operator=(const PT2 &RHS) {
Val.setPointerAndInt(
const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(RHS)),
1);
return *this;
}
void *getOpaqueValue() const { return Val.getOpaqueValue(); }
static inline PointerUnion getFromOpaqueValue(void *VP) {
PointerUnion V;
V.Val = ValTy::getFromOpaqueValue(VP);
return V;
}
};
template <typename PT1, typename PT2>
static bool operator==(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
return lhs.getOpaqueValue() == rhs.getOpaqueValue();
}
template <typename PT1, typename PT2>
static bool operator!=(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
return lhs.getOpaqueValue() != rhs.getOpaqueValue();
}
template <typename PT1, typename PT2>
static bool operator<(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
return lhs.getOpaqueValue() < rhs.getOpaqueValue();
}
// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
// # low bits available = min(PT1bits,PT2bits)-1.
template <typename PT1, typename PT2>
class PointerLikeTypeTraits<PointerUnion<PT1, PT2>> {
public:
static inline void *getAsVoidPointer(const PointerUnion<PT1, PT2> &P) {
return P.getOpaqueValue();
}
static inline PointerUnion<PT1, PT2> getFromVoidPointer(void *P) {
return PointerUnion<PT1, PT2>::getFromOpaqueValue(P);
}
// The number of bits available are the min of the two pointer types.
enum {
NumLowBitsAvailable = PointerLikeTypeTraits<
typename PointerUnion<PT1, PT2>::ValTy>::NumLowBitsAvailable
};
};
/// A pointer union of three pointer types. See documentation for PointerUnion
/// for usage.
template <typename PT1, typename PT2, typename PT3> class PointerUnion3 {
public:
typedef PointerUnion<PT1, PT2> InnerUnion;
typedef PointerUnion<InnerUnion, PT3> ValTy;
private:
ValTy Val;
struct IsInnerUnion {
ValTy Val;
IsInnerUnion(ValTy val) : Val(val) {}
template <typename T> int is() const {
return Val.template is<InnerUnion>() &&
Val.template get<InnerUnion>().template is<T>();
}
template <typename T> T get() const {
return Val.template get<InnerUnion>().template get<T>();
}
};
struct IsPT3 {
ValTy Val;
IsPT3(ValTy val) : Val(val) {}
template <typename T> int is() const { return Val.template is<T>(); }
template <typename T> T get() const { return Val.template get<T>(); }
};
public:
PointerUnion3() {}
PointerUnion3(PT1 V) { Val = InnerUnion(V); }
PointerUnion3(PT2 V) { Val = InnerUnion(V); }
PointerUnion3(PT3 V) { Val = V; }
/// Test if the pointer held in the union is null, regardless of
/// which type it is.
bool isNull() const { return Val.isNull(); }
explicit operator bool() const { return !isNull(); }
/// Test if the Union currently holds the type matching T.
template <typename T> int is() const {
// If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
typedef typename ::llvm::PointerUnionTypeSelector<
PT1, T, IsInnerUnion,
::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return
Ty;
return Ty(Val).template is<T>();
}
/// Returns the value of the specified pointer type.
///
/// If the specified pointer type is incorrect, assert.
template <typename T> T get() const {
assert(is<T>() && "Invalid accessor called");
// If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
typedef typename ::llvm::PointerUnionTypeSelector<
PT1, T, IsInnerUnion,
::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return
Ty;
return Ty(Val).template get<T>();
}
/// Returns the current pointer if it is of the specified pointer type,
/// otherwises returns null.
template <typename T> T dyn_cast() const {
if (is<T>())
return get<T>();
return T();
}
/// Assignment from nullptr which just clears the union.
const PointerUnion3 &operator=(std::nullptr_t) {
Val = nullptr;
return *this;
}
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
const PointerUnion3 &operator=(const PT1 &RHS) {
Val = InnerUnion(RHS);
return *this;
}
const PointerUnion3 &operator=(const PT2 &RHS) {
Val = InnerUnion(RHS);
return *this;
}
const PointerUnion3 &operator=(const PT3 &RHS) {
Val = RHS;
return *this;
}
void *getOpaqueValue() const { return Val.getOpaqueValue(); }
static inline PointerUnion3 getFromOpaqueValue(void *VP) {
PointerUnion3 V;
V.Val = ValTy::getFromOpaqueValue(VP);
return V;
}
};
// Teach SmallPtrSet that PointerUnion3 is "basically a pointer", that has
// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
template <typename PT1, typename PT2, typename PT3>
class PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3>> {
public:
static inline void *getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) {
return P.getOpaqueValue();
}
static inline PointerUnion3<PT1, PT2, PT3> getFromVoidPointer(void *P) {
return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P);
}
// The number of bits available are the min of the two pointer types.
enum {
NumLowBitsAvailable = PointerLikeTypeTraits<
typename PointerUnion3<PT1, PT2, PT3>::ValTy>::NumLowBitsAvailable
};
};
/// A pointer union of four pointer types. See documentation for PointerUnion
/// for usage.
template <typename PT1, typename PT2, typename PT3, typename PT4>
class PointerUnion4 {
public:
typedef PointerUnion<PT1, PT2> InnerUnion1;
typedef PointerUnion<PT3, PT4> InnerUnion2;
typedef PointerUnion<InnerUnion1, InnerUnion2> ValTy;
private:
ValTy Val;
public:
PointerUnion4() {}
PointerUnion4(PT1 V) { Val = InnerUnion1(V); }
PointerUnion4(PT2 V) { Val = InnerUnion1(V); }
PointerUnion4(PT3 V) { Val = InnerUnion2(V); }
PointerUnion4(PT4 V) { Val = InnerUnion2(V); }
/// Test if the pointer held in the union is null, regardless of
/// which type it is.
bool isNull() const { return Val.isNull(); }
explicit operator bool() const { return !isNull(); }
/// Test if the Union currently holds the type matching T.
template <typename T> int is() const {
// If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
typedef typename ::llvm::PointerUnionTypeSelector<
PT1, T, InnerUnion1, ::llvm::PointerUnionTypeSelector<
PT2, T, InnerUnion1, InnerUnion2>>::Return Ty;
return Val.template is<Ty>() && Val.template get<Ty>().template is<T>();
}
/// Returns the value of the specified pointer type.
///
/// If the specified pointer type is incorrect, assert.
template <typename T> T get() const {
assert(is<T>() && "Invalid accessor called");
// If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
typedef typename ::llvm::PointerUnionTypeSelector<
PT1, T, InnerUnion1, ::llvm::PointerUnionTypeSelector<
PT2, T, InnerUnion1, InnerUnion2>>::Return Ty;
return Val.template get<Ty>().template get<T>();
}
/// Returns the current pointer if it is of the specified pointer type,
/// otherwises returns null.
template <typename T> T dyn_cast() const {
if (is<T>())
return get<T>();
return T();
}
/// Assignment from nullptr which just clears the union.
const PointerUnion4 &operator=(std::nullptr_t) {
Val = nullptr;
return *this;
}
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
const PointerUnion4 &operator=(const PT1 &RHS) {
Val = InnerUnion1(RHS);
return *this;
}
const PointerUnion4 &operator=(const PT2 &RHS) {
Val = InnerUnion1(RHS);
return *this;
}
const PointerUnion4 &operator=(const PT3 &RHS) {
Val = InnerUnion2(RHS);
return *this;
}
const PointerUnion4 &operator=(const PT4 &RHS) {
Val = InnerUnion2(RHS);
return *this;
}
void *getOpaqueValue() const { return Val.getOpaqueValue(); }
static inline PointerUnion4 getFromOpaqueValue(void *VP) {
PointerUnion4 V;
V.Val = ValTy::getFromOpaqueValue(VP);
return V;
}
};
// Teach SmallPtrSet that PointerUnion4 is "basically a pointer", that has
// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
template <typename PT1, typename PT2, typename PT3, typename PT4>
class PointerLikeTypeTraits<PointerUnion4<PT1, PT2, PT3, PT4>> {
public:
static inline void *
getAsVoidPointer(const PointerUnion4<PT1, PT2, PT3, PT4> &P) {
return P.getOpaqueValue();
}
static inline PointerUnion4<PT1, PT2, PT3, PT4> getFromVoidPointer(void *P) {
return PointerUnion4<PT1, PT2, PT3, PT4>::getFromOpaqueValue(P);
}
// The number of bits available are the min of the two pointer types.
enum {
NumLowBitsAvailable = PointerLikeTypeTraits<
typename PointerUnion4<PT1, PT2, PT3, PT4>::ValTy>::NumLowBitsAvailable
};
};
// Teach DenseMap how to use PointerUnions as keys.
template <typename T, typename U> struct DenseMapInfo<PointerUnion<T, U>> {
typedef PointerUnion<T, U> Pair;
typedef DenseMapInfo<T> FirstInfo;
typedef DenseMapInfo<U> SecondInfo;
static inline Pair getEmptyKey() { return Pair(FirstInfo::getEmptyKey()); }
static inline Pair getTombstoneKey() {
return Pair(FirstInfo::getTombstoneKey());
}
static unsigned getHashValue(const Pair &PairVal) {
intptr_t key = (intptr_t)PairVal.getOpaqueValue();
return DenseMapInfo<intptr_t>::getHashValue(key);
}
static bool isEqual(const Pair &LHS, const Pair &RHS) {
return LHS.template is<T>() == RHS.template is<T>() &&
(LHS.template is<T>() ? FirstInfo::isEqual(LHS.template get<T>(),
RHS.template get<T>())
: SecondInfo::isEqual(LHS.template get<U>(),
RHS.template get<U>()));
}
};
}
#endif

View File

@ -1,300 +0,0 @@
//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file builds on the ADT/GraphTraits.h file to build a generic graph
// post order iterator. This should work over any graph type that has a
// GraphTraits specialization.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_POSTORDERITERATOR_H
#define LLVM_ADT_POSTORDERITERATOR_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include <set>
#include <vector>
namespace llvm {
// The po_iterator_storage template provides access to the set of already
// visited nodes during the po_iterator's depth-first traversal.
//
// The default implementation simply contains a set of visited nodes, while
// the External=true version uses a reference to an external set.
//
// It is possible to prune the depth-first traversal in several ways:
//
// - When providing an external set that already contains some graph nodes,
// those nodes won't be visited again. This is useful for restarting a
// post-order traversal on a graph with nodes that aren't dominated by a
// single node.
//
// - By providing a custom SetType class, unwanted graph nodes can be excluded
// by having the insert() function return false. This could for example
// confine a CFG traversal to blocks in a specific loop.
//
// - Finally, by specializing the po_iterator_storage template itself, graph
// edges can be pruned by returning false in the insertEdge() function. This
// could be used to remove loop back-edges from the CFG seen by po_iterator.
//
// A specialized po_iterator_storage class can observe both the pre-order and
// the post-order. The insertEdge() function is called in a pre-order, while
// the finishPostorder() function is called just before the po_iterator moves
// on to the next node.
/// Default po_iterator_storage implementation with an internal set object.
template<class SetType, bool External>
class po_iterator_storage {
SetType Visited;
public:
// Return true if edge destination should be visited.
template<typename NodeType>
bool insertEdge(NodeType *From, NodeType *To) {
return Visited.insert(To).second;
}
// Called after all children of BB have been visited.
template<typename NodeType>
void finishPostorder(NodeType *BB) {}
};
/// Specialization of po_iterator_storage that references an external set.
template<class SetType>
class po_iterator_storage<SetType, true> {
SetType &Visited;
public:
po_iterator_storage(SetType &VSet) : Visited(VSet) {}
po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
// Return true if edge destination should be visited, called with From = 0 for
// the root node.
// Graph edges can be pruned by specializing this function.
template <class NodeType> bool insertEdge(NodeType *From, NodeType *To) {
return Visited.insert(To).second;
}
// Called after all children of BB have been visited.
template<class NodeType>
void finishPostorder(NodeType *BB) {}
};
template<class GraphT,
class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
bool ExtStorage = false,
class GT = GraphTraits<GraphT> >
class po_iterator : public std::iterator<std::forward_iterator_tag,
typename GT::NodeType, ptrdiff_t>,
public po_iterator_storage<SetType, ExtStorage> {
typedef std::iterator<std::forward_iterator_tag,
typename GT::NodeType, ptrdiff_t> super;
typedef typename GT::NodeType NodeType;
typedef typename GT::ChildIteratorType ChildItTy;
// VisitStack - Used to maintain the ordering. Top = current block
// First element is basic block pointer, second is the 'next child' to visit
std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeType *BB = *VisitStack.back().second++;
if (this->insertEdge(VisitStack.back().first, BB)) {
// If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
po_iterator(NodeType *BB) {
this->insertEdge((NodeType*)nullptr, BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
po_iterator() {} // End is when stack is empty.
po_iterator(NodeType *BB, SetType &S)
: po_iterator_storage<SetType, ExtStorage>(S) {
if (this->insertEdge((NodeType*)nullptr, BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
}
po_iterator(SetType &S)
: po_iterator_storage<SetType, ExtStorage>(S) {
} // End is when stack is empty.
public:
typedef typename super::pointer pointer;
// Provide static "constructors"...
static po_iterator begin(GraphT G) {
return po_iterator(GT::getEntryNode(G));
}
static po_iterator end(GraphT G) { return po_iterator(); }
static po_iterator begin(GraphT G, SetType &S) {
return po_iterator(GT::getEntryNode(G), S);
}
static po_iterator end(GraphT G, SetType &S) { return po_iterator(S); }
bool operator==(const po_iterator &x) const {
return VisitStack == x.VisitStack;
}
bool operator!=(const po_iterator &x) const { return !(*this == x); }
pointer operator*() const { return VisitStack.back().first; }
// This is a nonstandard operator-> that dereferences the pointer an extra
// time... so that you can actually call methods ON the BasicBlock, because
// the contained type is a pointer. This allows BBIt->getTerminator() f.e.
//
NodeType *operator->() const { return **this; }
po_iterator &operator++() { // Preincrement
this->finishPostorder(VisitStack.back().first);
VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
return *this;
}
po_iterator operator++(int) { // Postincrement
po_iterator tmp = *this;
++*this;
return tmp;
}
};
// Provide global constructors that automatically figure out correct types...
//
template <class T>
po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); }
template <class T>
po_iterator<T> po_end (const T &G) { return po_iterator<T>::end(G); }
template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
return make_range(po_begin(G), po_end(G));
}
// Provide global definitions of external postorder iterators...
template<class T, class SetType=std::set<typename GraphTraits<T>::NodeType*> >
struct po_ext_iterator : public po_iterator<T, SetType, true> {
po_ext_iterator(const po_iterator<T, SetType, true> &V) :
po_iterator<T, SetType, true>(V) {}
};
template<class T, class SetType>
po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) {
return po_ext_iterator<T, SetType>::begin(G, S);
}
template<class T, class SetType>
po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) {
return po_ext_iterator<T, SetType>::end(G, S);
}
template <class T, class SetType>
iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) {
return make_range(po_ext_begin(G, S), po_ext_end(G, S));
}
// Provide global definitions of inverse post order iterators...
template <class T,
class SetType = std::set<typename GraphTraits<T>::NodeType*>,
bool External = false>
struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External > {
ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
po_iterator<Inverse<T>, SetType, External> (V) {}
};
template <class T>
ipo_iterator<T> ipo_begin(const T &G) {
return ipo_iterator<T>::begin(G);
}
template <class T>
ipo_iterator<T> ipo_end(const T &G){
return ipo_iterator<T>::end(G);
}
template <class T>
iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
return make_range(ipo_begin(G), ipo_end(G));
}
// Provide global definitions of external inverse postorder iterators...
template <class T,
class SetType = std::set<typename GraphTraits<T>::NodeType*> >
struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
ipo_iterator<T, SetType, true>(V) {}
ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
ipo_iterator<T, SetType, true>(V) {}
};
template <class T, class SetType>
ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) {
return ipo_ext_iterator<T, SetType>::begin(G, S);
}
template <class T, class SetType>
ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) {
return ipo_ext_iterator<T, SetType>::end(G, S);
}
template <class T, class SetType>
iterator_range<ipo_ext_iterator<T, SetType>>
inverse_post_order_ext(const T &G, SetType &S) {
return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S));
}
//===--------------------------------------------------------------------===//
// Reverse Post Order CFG iterator code
//===--------------------------------------------------------------------===//
//
// This is used to visit basic blocks in a method in reverse post order. This
// class is awkward to use because I don't know a good incremental algorithm to
// computer RPO from a graph. Because of this, the construction of the
// ReversePostOrderTraversal object is expensive (it must walk the entire graph
// with a postorder iterator to build the data structures). The moral of this
// story is: Don't create more ReversePostOrderTraversal classes than necessary.
//
// This class should be used like this:
// {
// ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create
// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
// ...
// }
// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
// ...
// }
// }
//
template<class GraphT, class GT = GraphTraits<GraphT> >
class ReversePostOrderTraversal {
typedef typename GT::NodeType NodeType;
std::vector<NodeType*> Blocks; // Block list in normal PO order
void Initialize(NodeType *BB) {
std::copy(po_begin(BB), po_end(BB), std::back_inserter(Blocks));
}
public:
typedef typename std::vector<NodeType*>::reverse_iterator rpo_iterator;
ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); }
// Because we want a reverse post order, use reverse iterators from the vector
rpo_iterator begin() { return Blocks.rbegin(); }
rpo_iterator end() { return Blocks.rend(); }
};
} // End llvm namespace
#endif

View File

@ -1,84 +0,0 @@
//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PriorityQueue class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_PRIORITYQUEUE_H
#define LLVM_ADT_PRIORITYQUEUE_H
#include <algorithm>
#include <queue>
namespace llvm {
/// PriorityQueue - This class behaves like std::priority_queue and
/// provides a few additional convenience functions.
///
template<class T,
class Sequence = std::vector<T>,
class Compare = std::less<typename Sequence::value_type> >
class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
public:
explicit PriorityQueue(const Compare &compare = Compare(),
const Sequence &sequence = Sequence())
: std::priority_queue<T, Sequence, Compare>(compare, sequence)
{}
template<class Iterator>
PriorityQueue(Iterator begin, Iterator end,
const Compare &compare = Compare(),
const Sequence &sequence = Sequence())
: std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence)
{}
/// erase_one - Erase one element from the queue, regardless of its
/// position. This operation performs a linear search to find an element
/// equal to t, but then uses all logarithmic-time algorithms to do
/// the erase operation.
///
void erase_one(const T &t) {
// Linear-search to find the element.
typename Sequence::size_type i =
std::find(this->c.begin(), this->c.end(), t) - this->c.begin();
// Logarithmic-time heap bubble-up.
while (i != 0) {
typename Sequence::size_type parent = (i - 1) / 2;
this->c[i] = this->c[parent];
i = parent;
}
// The element we want to remove is now at the root, so we can use
// priority_queue's plain pop to remove it.
this->pop();
}
/// reheapify - If an element in the queue has changed in a way that
/// affects its standing in the comparison function, the queue's
/// internal state becomes invalid. Calling reheapify() resets the
/// queue's state, making it valid again. This operation has time
/// complexity proportional to the number of elements in the queue,
/// so don't plan to use it a lot.
///
void reheapify() {
std::make_heap(this->c.begin(), this->c.end(), this->comp);
}
/// clear - Erase all elements from the queue.
///
void clear() {
this->c.clear();
}
};
} // End llvm namespace
#endif

View File

@ -1,224 +0,0 @@
//===- PriorityWorklist.h - Worklist with insertion priority ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
///
/// This file provides a priority worklist. See the class comments for details.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_PRIORITYWORKLIST_H
#define LLVM_ADT_PRIORITYWORKLIST_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <algorithm>
#include <cassert>
#include <utility>
#include <vector>
namespace llvm {
/// A FILO worklist that prioritizes on re-insertion without duplication.
///
/// This is very similar to a \c SetVector with the primary difference that
/// while re-insertion does not create a duplicate, it does adjust the
/// visitation order to respect the last insertion point. This can be useful
/// when the visit order needs to be prioritized based on insertion point
/// without actually having duplicate visits.
///
/// Note that this doesn't prevent re-insertion of elements which have been
/// visited -- if you need to break cycles, a set will still be necessary.
///
/// The type \c T must be default constructable to a null value that will be
/// ignored. It is an error to insert such a value, and popping elements will
/// never produce such a value. It is expected to be used with common nullable
/// types like pointers or optionals.
///
/// Internally this uses a vector to store the worklist and a map to identify
/// existing elements in the worklist. Both of these may be customized, but the
/// map must support the basic DenseMap API for mapping from a T to an integer
/// index into the vector.
///
/// A partial specialization is provided to automatically select a SmallVector
/// and a SmallDenseMap if custom data structures are not provided.
template <typename T, typename VectorT = std::vector<T>,
typename MapT = DenseMap<T, ptrdiff_t>>
class PriorityWorklist {
public:
typedef T value_type;
typedef T key_type;
typedef T& reference;
typedef const T& const_reference;
typedef typename MapT::size_type size_type;
/// Construct an empty PriorityWorklist
PriorityWorklist() {}
/// Determine if the PriorityWorklist is empty or not.
bool empty() const {
return V.empty();
}
/// Returns the number of elements in the worklist.
size_type size() const {
return M.size();
}
/// Count the number of elements of a given key in the PriorityWorklist.
/// \returns 0 if the element is not in the PriorityWorklist, 1 if it is.
size_type count(const key_type &key) const {
return M.count(key);
}
/// Return the last element of the PriorityWorklist.
const T &back() const {
assert(!empty() && "Cannot call back() on empty PriorityWorklist!");
return V.back();
}
/// Insert a new element into the PriorityWorklist.
/// \returns true if the element was inserted into the PriorityWorklist.
bool insert(const T &X) {
assert(X != T() && "Cannot insert a null (default constructed) value!");
auto InsertResult = M.insert({X, V.size()});
if (InsertResult.second) {
// Fresh value, just append it to the vector.
V.push_back(X);
return true;
}
auto &Index = InsertResult.first->second;
assert(V[Index] == X && "Value not actually at index in map!");
if (Index != (ptrdiff_t)(V.size() - 1)) {
// If the element isn't at the back, null it out and append a fresh one.
V[Index] = T();
Index = (ptrdiff_t)V.size();
V.push_back(X);
}
return false;
}
/// Remove the last element of the PriorityWorklist.
void pop_back() {
assert(!empty() && "Cannot remove an element when empty!");
assert(back() != T() && "Cannot have a null element at the back!");
M.erase(back());
do {
V.pop_back();
} while (!V.empty() && V.back() == T());
}
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
T Ret = back();
pop_back();
return Ret;
}
/// Erase an item from the worklist.
///
/// Note that this is constant time due to the nature of the worklist implementation.
bool erase(const T& X) {
auto I = M.find(X);
if (I == M.end())
return false;
assert(V[I->second] == X && "Value not actually at index in map!");
if (I->second == (ptrdiff_t)(V.size() - 1)) {
do {
V.pop_back();
} while (!V.empty() && V.back() == T());
} else {
V[I->second] = T();
}
M.erase(I);
return true;
}
/// Erase items from the set vector based on a predicate function.
///
/// This is intended to be equivalent to the following code, if we could
/// write it:
///
/// \code
/// V.erase(std::remove_if(V.begin(), V.end(), P), V.end());
/// \endcode
///
/// However, PriorityWorklist doesn't expose non-const iterators, making any
/// algorithm like remove_if impossible to use.
///
/// \returns true if any element is removed.
template <typename UnaryPredicate>
bool erase_if(UnaryPredicate P) {
typename VectorT::iterator E = std::remove_if(
V.begin(), V.end(), TestAndEraseFromMap<UnaryPredicate>(P, M));
if (E == V.end())
return false;
for (auto I = V.begin(); I != E; ++I)
if (*I != T())
M[*I] = I - V.begin();
V.erase(E, V.end());
return true;
}
/// Completely clear the PriorityWorklist
void clear() {
M.clear();
V.clear();
}
private:
/// A wrapper predicate designed for use with std::remove_if.
///
/// This predicate wraps a predicate suitable for use with std::remove_if to
/// call M.erase(x) on each element which is slated for removal. This just
/// allows the predicate to be move only which we can't do with lambdas
/// today.
template <typename UnaryPredicateT>
class TestAndEraseFromMap {
UnaryPredicateT P;
MapT &M;
public:
TestAndEraseFromMap(UnaryPredicateT P, MapT &M)
: P(std::move(P)), M(M) {}
bool operator()(const T &Arg) {
if (Arg == T())
// Skip null values in the PriorityWorklist.
return false;
if (P(Arg)) {
M.erase(Arg);
return true;
}
return false;
}
};
/// The map from value to index in the vector.
MapT M;
/// The vector of elements in insertion order.
VectorT V;
};
/// A version of \c PriorityWorklist that selects small size optimized data
/// structures for the vector and map.
template <typename T, unsigned N>
class SmallPriorityWorklist
: public PriorityWorklist<T, SmallVector<T, N>,
SmallDenseMap<T, ptrdiff_t>> {
public:
SmallPriorityWorklist() {}
};
}
#endif

View File

@ -1,244 +0,0 @@
//===---- ADT/SCCIterator.h - Strongly Connected Comp. Iter. ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This builds on the llvm/ADT/GraphTraits.h file to find the strongly
/// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS
/// algorithm.
///
/// The SCC iterator has the important property that if a node in SCC S1 has an
/// edge to a node in SCC S2, then it visits S1 *after* S2.
///
/// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE:
/// This requires some simple wrappers and is not supported yet.)
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SCCITERATOR_H
#define LLVM_ADT_SCCITERATOR_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include <vector>
namespace llvm {
/// \brief Enumerate the SCCs of a directed graph in reverse topological order
/// of the SCC DAG.
///
/// This is implemented using Tarjan's DFS algorithm using an internal stack to
/// build up a vector of nodes in a particular SCC. Note that it is a forward
/// iterator and thus you cannot backtrack or re-visit nodes.
template <class GraphT, class GT = GraphTraits<GraphT>>
class scc_iterator : public iterator_facade_base<
scc_iterator<GraphT, GT>, std::forward_iterator_tag,
const std::vector<typename GT::NodeRef>, ptrdiff_t> {
typedef typename GT::NodeRef NodeRef;
typedef typename GT::ChildIteratorType ChildItTy;
typedef std::vector<NodeRef> SccTy;
typedef typename scc_iterator::reference reference;
/// Element of VisitStack during DFS.
struct StackElement {
NodeRef Node; ///< The current node pointer.
ChildItTy NextChild; ///< The next child, modified inplace during DFS.
unsigned MinVisited; ///< Minimum uplink value of all children of Node.
StackElement(NodeRef Node, const ChildItTy &Child, unsigned Min)
: Node(Node), NextChild(Child), MinVisited(Min) {}
bool operator==(const StackElement &Other) const {
return Node == Other.Node &&
NextChild == Other.NextChild &&
MinVisited == Other.MinVisited;
}
};
/// The visit counters used to detect when a complete SCC is on the stack.
/// visitNum is the global counter.
///
/// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
unsigned visitNum;
DenseMap<NodeRef, unsigned> nodeVisitNumbers;
/// Stack holding nodes of the SCC.
std::vector<NodeRef> SCCNodeStack;
/// The current SCC, retrieved using operator*().
SccTy CurrentSCC;
/// DFS stack, Used to maintain the ordering. The top contains the current
/// node, the next child to visit, and the minimum uplink value of all child
std::vector<StackElement> VisitStack;
/// A single "visit" within the non-recursive DFS traversal.
void DFSVisitOne(NodeRef N);
/// The stack-based DFS traversal; defined below.
void DFSVisitChildren();
/// Compute the next SCC using the DFS traversal.
void GetNextSCC();
scc_iterator(NodeRef entryN) : visitNum(0) {
DFSVisitOne(entryN);
GetNextSCC();
}
/// End is when the DFS stack is empty.
scc_iterator() {}
public:
static scc_iterator begin(const GraphT &G) {
return scc_iterator(GT::getEntryNode(G));
}
static scc_iterator end(const GraphT &) { return scc_iterator(); }
/// \brief Direct loop termination test which is more efficient than
/// comparison with \c end().
bool isAtEnd() const {
assert(!CurrentSCC.empty() || VisitStack.empty());
return CurrentSCC.empty();
}
bool operator==(const scc_iterator &x) const {
return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
}
scc_iterator &operator++() {
GetNextSCC();
return *this;
}
reference operator*() const {
assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
return CurrentSCC;
}
/// \brief Test if the current SCC has a loop.
///
/// If the SCC has more than one node, this is trivially true. If not, it may
/// still contain a loop if the node has an edge back to itself.
bool hasLoop() const;
/// This informs the \c scc_iterator that the specified \c Old node
/// has been deleted, and \c New is to be used in its place.
void ReplaceNode(NodeRef Old, NodeRef New) {
assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
nodeVisitNumbers[New] = nodeVisitNumbers[Old];
nodeVisitNumbers.erase(Old);
}
};
template <class GraphT, class GT>
void scc_iterator<GraphT, GT>::DFSVisitOne(NodeRef N) {
++visitNum;
nodeVisitNumbers[N] = visitNum;
SCCNodeStack.push_back(N);
VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
#if 0 // Enable if needed when debugging.
dbgs() << "TarjanSCC: Node " << N <<
" : visitNum = " << visitNum << "\n";
#endif
}
template <class GraphT, class GT>
void scc_iterator<GraphT, GT>::DFSVisitChildren() {
assert(!VisitStack.empty());
while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
// TOS has at least one more child so continue DFS
NodeRef childN = *VisitStack.back().NextChild++;
typename DenseMap<NodeRef, unsigned>::iterator Visited =
nodeVisitNumbers.find(childN);
if (Visited == nodeVisitNumbers.end()) {
// this node has never been seen.
DFSVisitOne(childN);
continue;
}
unsigned childNum = Visited->second;
if (VisitStack.back().MinVisited > childNum)
VisitStack.back().MinVisited = childNum;
}
}
template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
CurrentSCC.clear(); // Prepare to compute the next SCC
while (!VisitStack.empty()) {
DFSVisitChildren();
// Pop the leaf on top of the VisitStack.
NodeRef visitingN = VisitStack.back().Node;
unsigned minVisitNum = VisitStack.back().MinVisited;
assert(VisitStack.back().NextChild == GT::child_end(visitingN));
VisitStack.pop_back();
// Propagate MinVisitNum to parent so we can detect the SCC starting node.
if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
VisitStack.back().MinVisited = minVisitNum;
#if 0 // Enable if needed when debugging.
dbgs() << "TarjanSCC: Popped node " << visitingN <<
" : minVisitNum = " << minVisitNum << "; Node visit num = " <<
nodeVisitNumbers[visitingN] << "\n";
#endif
if (minVisitNum != nodeVisitNumbers[visitingN])
continue;
// A full SCC is on the SCCNodeStack! It includes all nodes below
// visitingN on the stack. Copy those nodes to CurrentSCC,
// reset their minVisit values, and return (this suspends
// the DFS traversal till the next ++).
do {
CurrentSCC.push_back(SCCNodeStack.back());
SCCNodeStack.pop_back();
nodeVisitNumbers[CurrentSCC.back()] = ~0U;
} while (CurrentSCC.back() != visitingN);
return;
}
}
template <class GraphT, class GT>
bool scc_iterator<GraphT, GT>::hasLoop() const {
assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
if (CurrentSCC.size() > 1)
return true;
NodeRef N = CurrentSCC.front();
for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE;
++CI)
if (*CI == N)
return true;
return false;
}
/// \brief Construct the begin iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_begin(const T &G) {
return scc_iterator<T>::begin(G);
}
/// \brief Construct the end iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_end(const T &G) {
return scc_iterator<T>::end(G);
}
/// \brief Construct the begin iterator for a deduced graph type T's Inverse<T>.
template <class T> scc_iterator<Inverse<T> > scc_begin(const Inverse<T> &G) {
return scc_iterator<Inverse<T> >::begin(G);
}
/// \brief Construct the end iterator for a deduced graph type T's Inverse<T>.
template <class T> scc_iterator<Inverse<T> > scc_end(const Inverse<T> &G) {
return scc_iterator<Inverse<T> >::end(G);
}
} // End llvm namespace
#endif

View File

@ -1,613 +0,0 @@
//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains some templates that are useful if you are working with the
// STL at all.
//
// No library is required when using these functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STLEXTRAS_H
#define LLVM_ADT_STLEXTRAS_H
#include <algorithm> // for std::all_of
#include <cassert>
#include <cstddef> // for std::size_t
#include <cstdlib> // for qsort
#include <functional>
#include <iterator>
#include <memory>
#include <utility> // for std::pair
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
namespace detail {
template <typename RangeT>
using IterOfRange = decltype(std::begin(std::declval<RangeT>()));
} // End detail namespace
//===----------------------------------------------------------------------===//
// Extra additions to <functional>
//===----------------------------------------------------------------------===//
template<class Ty>
struct identity : public std::unary_function<Ty, Ty> {
Ty &operator()(Ty &self) const {
return self;
}
const Ty &operator()(const Ty &self) const {
return self;
}
};
template<class Ty>
struct less_ptr : public std::binary_function<Ty, Ty, bool> {
bool operator()(const Ty* left, const Ty* right) const {
return *left < *right;
}
};
template<class Ty>
struct greater_ptr : public std::binary_function<Ty, Ty, bool> {
bool operator()(const Ty* left, const Ty* right) const {
return *right < *left;
}
};
/// An efficient, type-erasing, non-owning reference to a callable. This is
/// intended for use as the type of a function parameter that is not used
/// after the function in question returns.
///
/// This class does not own the callable, so it is not in general safe to store
/// a function_ref.
template<typename Fn> class function_ref;
template<typename Ret, typename ...Params>
class function_ref<Ret(Params...)> {
Ret (*callback)(intptr_t callable, Params ...params);
intptr_t callable;
template<typename Callable>
static Ret callback_fn(intptr_t callable, Params ...params) {
return (*reinterpret_cast<Callable*>(callable))(
std::forward<Params>(params)...);
}
public:
template <typename Callable>
function_ref(Callable &&callable,
typename std::enable_if<
!std::is_same<typename std::remove_reference<Callable>::type,
function_ref>::value>::type * = nullptr)
: callback(callback_fn<typename std::remove_reference<Callable>::type>),
callable(reinterpret_cast<intptr_t>(&callable)) {}
Ret operator()(Params ...params) const {
return callback(callable, std::forward<Params>(params)...);
}
};
// deleter - Very very very simple method that is used to invoke operator
// delete on something. It is used like this:
//
// for_each(V.begin(), B.end(), deleter<Interval>);
//
template <class T>
inline void deleter(T *Ptr) {
delete Ptr;
}
//===----------------------------------------------------------------------===//
// Extra additions to <iterator>
//===----------------------------------------------------------------------===//
// mapped_iterator - This is a simple iterator adapter that causes a function to
// be dereferenced whenever operator* is invoked on the iterator.
//
template <class RootIt, class UnaryFunc>
class mapped_iterator {
RootIt current;
UnaryFunc Fn;
public:
typedef typename std::iterator_traits<RootIt>::iterator_category
iterator_category;
typedef typename std::iterator_traits<RootIt>::difference_type
difference_type;
typedef typename std::result_of<
UnaryFunc(decltype(*std::declval<RootIt>()))>
::type value_type;
typedef void pointer;
//typedef typename UnaryFunc::result_type *pointer;
typedef void reference; // Can't modify value returned by fn
typedef RootIt iterator_type;
inline const RootIt &getCurrent() const { return current; }
inline const UnaryFunc &getFunc() const { return Fn; }
inline explicit mapped_iterator(const RootIt &I, UnaryFunc F)
: current(I), Fn(F) {}
inline value_type operator*() const { // All this work to do this
return Fn(*current); // little change
}
mapped_iterator &operator++() {
++current;
return *this;
}
mapped_iterator &operator--() {
--current;
return *this;
}
mapped_iterator operator++(int) {
mapped_iterator __tmp = *this;
++current;
return __tmp;
}
mapped_iterator operator--(int) {
mapped_iterator __tmp = *this;
--current;
return __tmp;
}
mapped_iterator operator+(difference_type n) const {
return mapped_iterator(current + n, Fn);
}
mapped_iterator &operator+=(difference_type n) {
current += n;
return *this;
}
mapped_iterator operator-(difference_type n) const {
return mapped_iterator(current - n, Fn);
}
mapped_iterator &operator-=(difference_type n) {
current -= n;
return *this;
}
reference operator[](difference_type n) const { return *(*this + n); }
bool operator!=(const mapped_iterator &X) const { return !operator==(X); }
bool operator==(const mapped_iterator &X) const {
return current == X.current;
}
bool operator<(const mapped_iterator &X) const { return current < X.current; }
difference_type operator-(const mapped_iterator &X) const {
return current - X.current;
}
};
template <class Iterator, class Func>
inline mapped_iterator<Iterator, Func>
operator+(typename mapped_iterator<Iterator, Func>::difference_type N,
const mapped_iterator<Iterator, Func> &X) {
return mapped_iterator<Iterator, Func>(X.getCurrent() - N, X.getFunc());
}
// map_iterator - Provide a convenient way to create mapped_iterators, just like
// make_pair is useful for creating pairs...
//
template <class ItTy, class FuncTy>
inline mapped_iterator<ItTy, FuncTy> map_iterator(const ItTy &I, FuncTy F) {
return mapped_iterator<ItTy, FuncTy>(I, F);
}
/// \brief Metafunction to determine if type T has a member called rbegin().
template <typename T> struct has_rbegin {
template <typename U> static char(&f(const U &, decltype(&U::rbegin)))[1];
static char(&f(...))[2];
const static bool value = sizeof(f(std::declval<T>(), nullptr)) == 1;
};
// Returns an iterator_range over the given container which iterates in reverse.
// Note that the container must have rbegin()/rend() methods for this to work.
template <typename ContainerTy>
auto reverse(ContainerTy &&C,
typename std::enable_if<has_rbegin<ContainerTy>::value>::type * =
nullptr) -> decltype(make_range(C.rbegin(), C.rend())) {
return make_range(C.rbegin(), C.rend());
}
// Returns a std::reverse_iterator wrapped around the given iterator.
template <typename IteratorTy>
std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
return std::reverse_iterator<IteratorTy>(It);
}
// Returns an iterator_range over the given container which iterates in reverse.
// Note that the container must have begin()/end() methods which return
// bidirectional iterators for this to work.
template <typename ContainerTy>
auto reverse(
ContainerTy &&C,
typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr)
-> decltype(make_range(llvm::make_reverse_iterator(std::end(C)),
llvm::make_reverse_iterator(std::begin(C)))) {
return make_range(llvm::make_reverse_iterator(std::end(C)),
llvm::make_reverse_iterator(std::begin(C)));
}
/// An iterator adaptor that filters the elements of given inner iterators.
///
/// The predicate parameter should be a callable object that accepts the wrapped
/// iterator's reference type and returns a bool. When incrementing or
/// decrementing the iterator, it will call the predicate on each element and
/// skip any where it returns false.
///
/// \code
/// int A[] = { 1, 2, 3, 4 };
/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
/// // R contains { 1, 3 }.
/// \endcode
template <typename WrappedIteratorT, typename PredicateT>
class filter_iterator
: public iterator_adaptor_base<
filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
typename std::common_type<
std::forward_iterator_tag,
typename std::iterator_traits<
WrappedIteratorT>::iterator_category>::type> {
using BaseT = iterator_adaptor_base<
filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
typename std::common_type<
std::forward_iterator_tag,
typename std::iterator_traits<WrappedIteratorT>::iterator_category>::
type>;
struct PayloadType {
WrappedIteratorT End;
PredicateT Pred;
};
Optional<PayloadType> Payload;
void findNextValid() {
assert(Payload && "Payload should be engaged when findNextValid is called");
while (this->I != Payload->End && !Payload->Pred(*this->I))
BaseT::operator++();
}
// Construct the begin iterator. The begin iterator requires to know where end
// is, so that it can properly stop when it hits end.
filter_iterator(WrappedIteratorT Begin, WrappedIteratorT End, PredicateT Pred)
: BaseT(std::move(Begin)),
Payload(PayloadType{std::move(End), std::move(Pred)}) {
findNextValid();
}
// Construct the end iterator. It's not incrementable, so Payload doesn't
// have to be engaged.
filter_iterator(WrappedIteratorT End) : BaseT(End) {}
public:
using BaseT::operator++;
filter_iterator &operator++() {
BaseT::operator++();
findNextValid();
return *this;
}
template <typename RT, typename PT>
friend iterator_range<filter_iterator<detail::IterOfRange<RT>, PT>>
make_filter_range(RT &&, PT);
};
/// Convenience function that takes a range of elements and a predicate,
/// and return a new filter_iterator range.
///
/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
/// lifetime of that temporary is not kept by the returned range object, and the
/// temporary is going to be dropped on the floor after the make_iterator_range
/// full expression that contains this function call.
template <typename RangeT, typename PredicateT>
iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
make_filter_range(RangeT &&Range, PredicateT Pred) {
using FilterIteratorT =
filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
return make_range(FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
std::end(std::forward<RangeT>(Range)),
std::move(Pred)),
FilterIteratorT(std::end(std::forward<RangeT>(Range))));
}
//===----------------------------------------------------------------------===//
// Extra additions to <utility>
//===----------------------------------------------------------------------===//
/// \brief Function object to check whether the first component of a std::pair
/// compares less than the first component of another std::pair.
struct less_first {
template <typename T> bool operator()(const T &lhs, const T &rhs) const {
return lhs.first < rhs.first;
}
};
/// \brief Function object to check whether the second component of a std::pair
/// compares less than the second component of another std::pair.
struct less_second {
template <typename T> bool operator()(const T &lhs, const T &rhs) const {
return lhs.second < rhs.second;
}
};
// A subset of N3658. More stuff can be added as-needed.
/// \brief Represents a compile-time sequence of integers.
template <class T, T... I> struct integer_sequence {
typedef T value_type;
static LLVM_CONSTEXPR size_t size() { return sizeof...(I); }
};
/// \brief Alias for the common case of a sequence of size_ts.
template <size_t... I>
struct index_sequence : integer_sequence<std::size_t, I...> {};
template <std::size_t N, std::size_t... I>
struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
template <std::size_t... I>
struct build_index_impl<0, I...> : index_sequence<I...> {};
/// \brief Creates a compile-time integer sequence for a parameter pack.
template <class... Ts>
struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
//===----------------------------------------------------------------------===//
// Extra additions for arrays
//===----------------------------------------------------------------------===//
/// Find the length of an array.
template <class T, std::size_t N>
LLVM_CONSTEXPR inline size_t array_lengthof(T (&)[N]) {
return N;
}
/// Adapt std::less<T> for array_pod_sort.
template<typename T>
inline int array_pod_sort_comparator(const void *P1, const void *P2) {
if (std::less<T>()(*reinterpret_cast<const T*>(P1),
*reinterpret_cast<const T*>(P2)))
return -1;
if (std::less<T>()(*reinterpret_cast<const T*>(P2),
*reinterpret_cast<const T*>(P1)))
return 1;
return 0;
}
/// get_array_pod_sort_comparator - This is an internal helper function used to
/// get type deduction of T right.
template<typename T>
inline int (*get_array_pod_sort_comparator(const T &))
(const void*, const void*) {
return array_pod_sort_comparator<T>;
}
/// array_pod_sort - This sorts an array with the specified start and end
/// extent. This is just like std::sort, except that it calls qsort instead of
/// using an inlined template. qsort is slightly slower than std::sort, but
/// most sorts are not performance critical in LLVM and std::sort has to be
/// template instantiated for each type, leading to significant measured code
/// bloat. This function should generally be used instead of std::sort where
/// possible.
///
/// This function assumes that you have simple POD-like types that can be
/// compared with std::less and can be moved with memcpy. If this isn't true,
/// you should use std::sort.
///
/// NOTE: If qsort_r were portable, we could allow a custom comparator and
/// default to std::less.
template<class IteratorTy>
inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
// Don't inefficiently call qsort with one element or trigger undefined
// behavior with an empty sequence.
auto NElts = End - Start;
if (NElts <= 1) return;
qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
}
template <class IteratorTy>
inline void array_pod_sort(
IteratorTy Start, IteratorTy End,
int (*Compare)(
const typename std::iterator_traits<IteratorTy>::value_type *,
const typename std::iterator_traits<IteratorTy>::value_type *)) {
// Don't inefficiently call qsort with one element or trigger undefined
// behavior with an empty sequence.
auto NElts = End - Start;
if (NElts <= 1) return;
qsort(&*Start, NElts, sizeof(*Start),
reinterpret_cast<int (*)(const void *, const void *)>(Compare));
}
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
/// For a container of pointers, deletes the pointers and then clears the
/// container.
template<typename Container>
void DeleteContainerPointers(Container &C) {
for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
delete *I;
C.clear();
}
/// In a container of pairs (usually a map) whose second element is a pointer,
/// deletes the second elements and then clears the container.
template<typename Container>
void DeleteContainerSeconds(Container &C) {
for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
delete I->second;
C.clear();
}
/// Provide wrappers to std::all_of which take ranges instead of having to pass
/// begin/end explicitly.
template<typename R, class UnaryPredicate>
bool all_of(R &&Range, UnaryPredicate &&P) {
return std::all_of(Range.begin(), Range.end(),
std::forward<UnaryPredicate>(P));
}
/// Provide wrappers to std::any_of which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, class UnaryPredicate>
bool any_of(R &&Range, UnaryPredicate &&P) {
return std::any_of(Range.begin(), Range.end(),
std::forward<UnaryPredicate>(P));
}
/// Provide wrappers to std::none_of which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, class UnaryPredicate>
bool none_of(R &&Range, UnaryPredicate &&P) {
return std::none_of(Range.begin(), Range.end(),
std::forward<UnaryPredicate>(P));
}
/// Provide wrappers to std::find which take ranges instead of having to pass
/// begin/end explicitly.
template<typename R, class T>
auto find(R &&Range, const T &val) -> decltype(Range.begin()) {
return std::find(Range.begin(), Range.end(), val);
}
/// Provide wrappers to std::find_if which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, class T>
auto find_if(R &&Range, const T &Pred) -> decltype(Range.begin()) {
return std::find_if(Range.begin(), Range.end(), Pred);
}
/// Provide wrappers to std::remove_if which take ranges instead of having to
/// pass begin/end explicitly.
template<typename R, class UnaryPredicate>
auto remove_if(R &&Range, UnaryPredicate &&P) -> decltype(Range.begin()) {
return std::remove_if(Range.begin(), Range.end(), P);
}
/// Wrapper function around std::find to detect if an element exists
/// in a container.
template <typename R, typename E>
bool is_contained(R &&Range, const E &Element) {
return std::find(Range.begin(), Range.end(), Element) != Range.end();
}
/// Wrapper function around std::count_if to count the number of times an
/// element satisfying a given predicate occurs in a range.
template <typename R, typename UnaryPredicate>
auto count_if(R &&Range, UnaryPredicate &&P)
-> typename std::iterator_traits<decltype(Range.begin())>::difference_type {
return std::count_if(Range.begin(), Range.end(), P);
}
/// Wrapper function around std::transform to apply a function to a range and
/// store the result elsewhere.
template <typename R, class OutputIt, typename UnaryPredicate>
OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate &&P) {
return std::transform(Range.begin(), Range.end(), d_first,
std::forward<UnaryPredicate>(P));
}
//===----------------------------------------------------------------------===//
// Extra additions to <memory>
//===----------------------------------------------------------------------===//
// Implement make_unique according to N3656.
/// \brief Constructs a `new T()` with the given args and returns a
/// `unique_ptr<T>` which owns the object.
///
/// Example:
///
/// auto p = make_unique<int>();
/// auto p = make_unique<std::tuple<int, int>>(0, 1);
template <class T, class... Args>
typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
make_unique(Args &&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/// \brief Constructs a `new T[n]` with the given args and returns a
/// `unique_ptr<T[]>` which owns the object.
///
/// \param n size of the new array.
///
/// Example:
///
/// auto p = make_unique<int[]>(2); // value-initializes the array with 0's.
template <class T>
typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0,
std::unique_ptr<T>>::type
make_unique(size_t n) {
return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
}
/// This function isn't used and is only here to provide better compile errors.
template <class T, class... Args>
typename std::enable_if<std::extent<T>::value != 0>::type
make_unique(Args &&...) = delete;
struct FreeDeleter {
void operator()(void* v) {
::free(v);
}
};
template<typename First, typename Second>
struct pair_hash {
size_t operator()(const std::pair<First, Second> &P) const {
return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
}
};
/// A functor like C++14's std::less<void> in its absence.
struct less {
template <typename A, typename B> bool operator()(A &&a, B &&b) const {
return std::forward<A>(a) < std::forward<B>(b);
}
};
/// A functor like C++14's std::equal<void> in its absence.
struct equal {
template <typename A, typename B> bool operator()(A &&a, B &&b) const {
return std::forward<A>(a) == std::forward<B>(b);
}
};
/// Binary functor that adapts to any other binary functor after dereferencing
/// operands.
template <typename T> struct deref {
T func;
// Could be further improved to cope with non-derivable functors and
// non-binary functors (should be a variadic template member function
// operator()).
template <typename A, typename B>
auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) {
assert(lhs);
assert(rhs);
return func(*lhs, *rhs);
}
};
} // End llvm namespace
#endif

View File

@ -1,256 +0,0 @@
//===- ScopedHashTable.h - A simple scoped hash table -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an efficient scoped hash table, which is useful for
// things like dominator-based optimizations. This allows clients to do things
// like this:
//
// ScopedHashTable<int, int> HT;
// {
// ScopedHashTableScope<int, int> Scope1(HT);
// HT.insert(0, 0);
// HT.insert(1, 1);
// {
// ScopedHashTableScope<int, int> Scope2(HT);
// HT.insert(0, 42);
// }
// }
//
// Looking up the value for "0" in the Scope2 block will return 42. Looking
// up the value for 0 before 42 is inserted or after Scope2 is popped will
// return 0.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
#define LLVM_ADT_SCOPEDHASHTABLE_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Allocator.h"
namespace llvm {
template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
typename AllocatorTy = MallocAllocator>
class ScopedHashTable;
template <typename K, typename V>
class ScopedHashTableVal {
ScopedHashTableVal *NextInScope;
ScopedHashTableVal *NextForKey;
K Key;
V Val;
ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
public:
const K &getKey() const { return Key; }
const V &getValue() const { return Val; }
V &getValue() { return Val; }
ScopedHashTableVal *getNextForKey() { return NextForKey; }
const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
ScopedHashTableVal *getNextInScope() { return NextInScope; }
template <typename AllocatorTy>
static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
ScopedHashTableVal *nextForKey,
const K &key, const V &val,
AllocatorTy &Allocator) {
ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
// Set up the value.
new (New) ScopedHashTableVal(key, val);
New->NextInScope = nextInScope;
New->NextForKey = nextForKey;
return New;
}
template <typename AllocatorTy> void Destroy(AllocatorTy &Allocator) {
// Free memory referenced by the item.
this->~ScopedHashTableVal();
Allocator.Deallocate(this);
}
};
template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
typename AllocatorTy = MallocAllocator>
class ScopedHashTableScope {
/// HT - The hashtable that we are active for.
ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
/// PrevScope - This is the scope that we are shadowing in HT.
ScopedHashTableScope *PrevScope;
/// LastValInScope - This is the last value that was inserted for this scope
/// or null if none have been inserted yet.
ScopedHashTableVal<K, V> *LastValInScope;
void operator=(ScopedHashTableScope &) = delete;
ScopedHashTableScope(ScopedHashTableScope &) = delete;
public:
ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
~ScopedHashTableScope();
ScopedHashTableScope *getParentScope() { return PrevScope; }
const ScopedHashTableScope *getParentScope() const { return PrevScope; }
private:
friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
ScopedHashTableVal<K, V> *getLastValInScope() {
return LastValInScope;
}
void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
LastValInScope = Val;
}
};
template <typename K, typename V, typename KInfo = DenseMapInfo<K>>
class ScopedHashTableIterator {
ScopedHashTableVal<K, V> *Node;
public:
ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
V &operator*() const {
assert(Node && "Dereference end()");
return Node->getValue();
}
V *operator->() const {
return &Node->getValue();
}
bool operator==(const ScopedHashTableIterator &RHS) const {
return Node == RHS.Node;
}
bool operator!=(const ScopedHashTableIterator &RHS) const {
return Node != RHS.Node;
}
inline ScopedHashTableIterator& operator++() { // Preincrement
assert(Node && "incrementing past end()");
Node = Node->getNextForKey();
return *this;
}
ScopedHashTableIterator operator++(int) { // Postincrement
ScopedHashTableIterator tmp = *this; ++*this; return tmp;
}
};
template <typename K, typename V, typename KInfo, typename AllocatorTy>
class ScopedHashTable {
public:
/// ScopeTy - This is a helpful typedef that allows clients to get easy access
/// to the name of the scope for this hash table.
typedef ScopedHashTableScope<K, V, KInfo, AllocatorTy> ScopeTy;
typedef unsigned size_type;
private:
typedef ScopedHashTableVal<K, V> ValTy;
DenseMap<K, ValTy*, KInfo> TopLevelMap;
ScopeTy *CurScope;
AllocatorTy Allocator;
ScopedHashTable(const ScopedHashTable &); // NOT YET IMPLEMENTED
void operator=(const ScopedHashTable &); // NOT YET IMPLEMENTED
friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
public:
ScopedHashTable() : CurScope(nullptr) {}
ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
~ScopedHashTable() {
assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
}
/// Access to the allocator.
AllocatorTy &getAllocator() { return Allocator; }
const AllocatorTy &getAllocator() const { return Allocator; }
/// Return 1 if the specified key is in the table, 0 otherwise.
size_type count(const K &Key) const {
return TopLevelMap.count(Key);
}
V lookup(const K &Key) {
typename DenseMap<K, ValTy*, KInfo>::iterator I = TopLevelMap.find(Key);
if (I != TopLevelMap.end())
return I->second->getValue();
return V();
}
void insert(const K &Key, const V &Val) {
insertIntoScope(CurScope, Key, Val);
}
typedef ScopedHashTableIterator<K, V, KInfo> iterator;
iterator end() { return iterator(0); }
iterator begin(const K &Key) {
typename DenseMap<K, ValTy*, KInfo>::iterator I =
TopLevelMap.find(Key);
if (I == TopLevelMap.end()) return end();
return iterator(I->second);
}
ScopeTy *getCurScope() { return CurScope; }
const ScopeTy *getCurScope() const { return CurScope; }
/// insertIntoScope - This inserts the specified key/value at the specified
/// (possibly not the current) scope. While it is ok to insert into a scope
/// that isn't the current one, it isn't ok to insert *underneath* an existing
/// value of the specified key.
void insertIntoScope(ScopeTy *S, const K &Key, const V &Val) {
assert(S && "No scope active!");
ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
Allocator);
S->setLastValInScope(KeyEntry);
}
};
/// ScopedHashTableScope ctor - Install this as the current scope for the hash
/// table.
template <typename K, typename V, typename KInfo, typename Allocator>
ScopedHashTableScope<K, V, KInfo, Allocator>::
ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
PrevScope = HT.CurScope;
HT.CurScope = this;
LastValInScope = nullptr;
}
template <typename K, typename V, typename KInfo, typename Allocator>
ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
assert(HT.CurScope == this && "Scope imbalance!");
HT.CurScope = PrevScope;
// Pop and delete all values corresponding to this scope.
while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
// Pop this value out of the TopLevelMap.
if (!ThisEntry->getNextForKey()) {
assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
"Scope imbalance!");
HT.TopLevelMap.erase(ThisEntry->getKey());
} else {
ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
assert(KeyEntry == ThisEntry && "Scope imbalance!");
KeyEntry = ThisEntry->getNextForKey();
}
// Pop this value out of the scope.
LastValInScope = ThisEntry->getNextInScope();
// Delete this entry.
ThisEntry->Destroy(HT.getAllocator());
}
}
} // end namespace llvm
#endif

View File

@ -1,79 +0,0 @@
//===- Sequence.h - Utility for producing sequences of values ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This routine provides some synthesis utilities to produce sequences of
/// values. The names are intentionally kept very short as they tend to occur
/// in common and widely used contexts.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SEQ_H
#define LLVM_ADT_SEQ_H
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
namespace llvm {
namespace detail {
template <typename ValueT>
class value_sequence_iterator
: public iterator_facade_base<value_sequence_iterator<ValueT>,
std::random_access_iterator_tag,
const ValueT> {
typedef typename value_sequence_iterator::iterator_facade_base BaseT;
ValueT Value;
public:
typedef typename BaseT::difference_type difference_type;
typedef typename BaseT::reference reference;
value_sequence_iterator() = default;
value_sequence_iterator(const value_sequence_iterator &) = default;
value_sequence_iterator(value_sequence_iterator &&Arg)
: Value(std::move(Arg.Value)) {}
template <typename U, typename Enabler = decltype(ValueT(std::declval<U>()))>
value_sequence_iterator(U &&Value) : Value(std::forward<U>(Value)) {}
value_sequence_iterator &operator+=(difference_type N) {
Value += N;
return *this;
}
value_sequence_iterator &operator-=(difference_type N) {
Value -= N;
return *this;
}
using BaseT::operator-;
difference_type operator-(const value_sequence_iterator &RHS) const {
return Value - RHS.Value;
}
bool operator==(const value_sequence_iterator &RHS) const {
return Value == RHS.Value;
}
bool operator<(const value_sequence_iterator &RHS) const {
return Value < RHS.Value;
}
reference operator*() const { return Value; }
};
} // End detail namespace.
template <typename ValueT>
iterator_range<detail::value_sequence_iterator<ValueT>> seq(ValueT Begin,
ValueT End) {
return make_range(detail::value_sequence_iterator<ValueT>(Begin),
detail::value_sequence_iterator<ValueT>(End));
}
}
#endif

View File

@ -1,71 +0,0 @@
//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines generic set operations that may be used on set's of
// different types, and different element types.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SETOPERATIONS_H
#define LLVM_ADT_SETOPERATIONS_H
namespace llvm {
/// set_union(A, B) - Compute A := A u B, return whether A changed.
///
template <class S1Ty, class S2Ty>
bool set_union(S1Ty &S1, const S2Ty &S2) {
bool Changed = false;
for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
SI != SE; ++SI)
if (S1.insert(*SI).second)
Changed = true;
return Changed;
}
/// set_intersect(A, B) - Compute A := A ^ B
/// Identical to set_intersection, except that it works on set<>'s and
/// is nicer to use. Functionally, this iterates through S1, removing
/// elements that are not contained in S2.
///
template <class S1Ty, class S2Ty>
void set_intersect(S1Ty &S1, const S2Ty &S2) {
for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) {
const auto &E = *I;
++I;
if (!S2.count(E)) S1.erase(E); // Erase element if not in S2
}
}
/// set_difference(A, B) - Return A - B
///
template <class S1Ty, class S2Ty>
S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) {
S1Ty Result;
for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end();
SI != SE; ++SI)
if (!S2.count(*SI)) // if the element is not in set2
Result.insert(*SI);
return Result;
}
/// set_subtract(A, B) - Compute A := A - B
///
template <class S1Ty, class S2Ty>
void set_subtract(S1Ty &S1, const S2Ty &S2) {
for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
SI != SE; ++SI)
S1.erase(*SI);
}
} // End llvm namespace
#endif

View File

@ -1,300 +0,0 @@
//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a set that has insertion order iteration
// characteristics. This is useful for keeping a set of things that need to be
// visited later but in a deterministic order (insertion order). The interface
// is purposefully minimal.
//
// This file defines SetVector and SmallSetVector, which performs no allocations
// if the SetVector has less than a certain number of elements.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SETVECTOR_H
#define LLVM_ADT_SETVECTOR_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallSet.h"
#include <algorithm>
#include <cassert>
#include <utility>
#include <vector>
namespace llvm {
/// \brief A vector that has set insertion semantics.
///
/// This adapter class provides a way to keep a set of things that also has the
/// property of a deterministic iteration order. The order of iteration is the
/// order of insertion.
template <typename T, typename Vector = std::vector<T>,
typename Set = DenseSet<T>>
class SetVector {
public:
typedef T value_type;
typedef T key_type;
typedef T& reference;
typedef const T& const_reference;
typedef Set set_type;
typedef Vector vector_type;
typedef typename vector_type::const_iterator iterator;
typedef typename vector_type::const_iterator const_iterator;
typedef typename vector_type::const_reverse_iterator reverse_iterator;
typedef typename vector_type::const_reverse_iterator const_reverse_iterator;
typedef typename vector_type::size_type size_type;
/// \brief Construct an empty SetVector
SetVector() {}
/// \brief Initialize a SetVector with a range of elements
template<typename It>
SetVector(It Start, It End) {
insert(Start, End);
}
ArrayRef<T> getArrayRef() const { return vector_; }
/// \brief Determine if the SetVector is empty or not.
bool empty() const {
return vector_.empty();
}
/// \brief Determine the number of elements in the SetVector.
size_type size() const {
return vector_.size();
}
/// \brief Get an iterator to the beginning of the SetVector.
iterator begin() {
return vector_.begin();
}
/// \brief Get a const_iterator to the beginning of the SetVector.
const_iterator begin() const {
return vector_.begin();
}
/// \brief Get an iterator to the end of the SetVector.
iterator end() {
return vector_.end();
}
/// \brief Get a const_iterator to the end of the SetVector.
const_iterator end() const {
return vector_.end();
}
/// \brief Get an reverse_iterator to the end of the SetVector.
reverse_iterator rbegin() {
return vector_.rbegin();
}
/// \brief Get a const_reverse_iterator to the end of the SetVector.
const_reverse_iterator rbegin() const {
return vector_.rbegin();
}
/// \brief Get a reverse_iterator to the beginning of the SetVector.
reverse_iterator rend() {
return vector_.rend();
}
/// \brief Get a const_reverse_iterator to the beginning of the SetVector.
const_reverse_iterator rend() const {
return vector_.rend();
}
/// \brief Return the last element of the SetVector.
const T &back() const {
assert(!empty() && "Cannot call back() on empty SetVector!");
return vector_.back();
}
/// \brief Index into the SetVector.
const_reference operator[](size_type n) const {
assert(n < vector_.size() && "SetVector access out of range!");
return vector_[n];
}
/// \brief Insert a new element into the SetVector.
/// \returns true if the element was inserted into the SetVector.
bool insert(const value_type &X) {
bool result = set_.insert(X).second;
if (result)
vector_.push_back(X);
return result;
}
/// \brief Insert a range of elements into the SetVector.
template<typename It>
void insert(It Start, It End) {
for (; Start != End; ++Start)
if (set_.insert(*Start).second)
vector_.push_back(*Start);
}
/// \brief Remove an item from the set vector.
bool remove(const value_type& X) {
if (set_.erase(X)) {
typename vector_type::iterator I =
std::find(vector_.begin(), vector_.end(), X);
assert(I != vector_.end() && "Corrupted SetVector instances!");
vector_.erase(I);
return true;
}
return false;
}
/// Erase a single element from the set vector.
/// \returns an iterator pointing to the next element that followed the
/// element erased. This is the end of the SetVector if the last element is
/// erased.
iterator erase(iterator I) {
const key_type &V = *I;
assert(set_.count(V) && "Corrupted SetVector instances!");
set_.erase(V);
// FIXME: No need to use the non-const iterator when built with
// std:vector.erase(const_iterator) as defined in C++11. This is for
// compatibility with non-standard libstdc++ up to 4.8 (fixed in 4.9).
auto NI = vector_.begin();
std::advance(NI, std::distance<iterator>(NI, I));
return vector_.erase(NI);
}
/// \brief Remove items from the set vector based on a predicate function.
///
/// This is intended to be equivalent to the following code, if we could
/// write it:
///
/// \code
/// V.erase(std::remove_if(V.begin(), V.end(), P), V.end());
/// \endcode
///
/// However, SetVector doesn't expose non-const iterators, making any
/// algorithm like remove_if impossible to use.
///
/// \returns true if any element is removed.
template <typename UnaryPredicate>
bool remove_if(UnaryPredicate P) {
typename vector_type::iterator I
= std::remove_if(vector_.begin(), vector_.end(),
TestAndEraseFromSet<UnaryPredicate>(P, set_));
if (I == vector_.end())
return false;
vector_.erase(I, vector_.end());
return true;
}
/// \brief Count the number of elements of a given key in the SetVector.
/// \returns 0 if the element is not in the SetVector, 1 if it is.
size_type count(const key_type &key) const {
return set_.count(key);
}
/// \brief Completely clear the SetVector
void clear() {
set_.clear();
vector_.clear();
}
/// \brief Remove the last element of the SetVector.
void pop_back() {
assert(!empty() && "Cannot remove an element from an empty SetVector!");
set_.erase(back());
vector_.pop_back();
}
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
T Ret = back();
pop_back();
return Ret;
}
bool operator==(const SetVector &that) const {
return vector_ == that.vector_;
}
bool operator!=(const SetVector &that) const {
return vector_ != that.vector_;
}
/// \brief Compute This := This u S, return whether 'This' changed.
/// TODO: We should be able to use set_union from SetOperations.h, but
/// SetVector interface is inconsistent with DenseSet.
template <class STy>
bool set_union(const STy &S) {
bool Changed = false;
for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
++SI)
if (insert(*SI))
Changed = true;
return Changed;
}
/// \brief Compute This := This - B
/// TODO: We should be able to use set_subtract from SetOperations.h, but
/// SetVector interface is inconsistent with DenseSet.
template <class STy>
void set_subtract(const STy &S) {
for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
++SI)
remove(*SI);
}
private:
/// \brief A wrapper predicate designed for use with std::remove_if.
///
/// This predicate wraps a predicate suitable for use with std::remove_if to
/// call set_.erase(x) on each element which is slated for removal.
template <typename UnaryPredicate>
class TestAndEraseFromSet {
UnaryPredicate P;
set_type &set_;
public:
TestAndEraseFromSet(UnaryPredicate P, set_type &set_)
: P(std::move(P)), set_(set_) {}
template <typename ArgumentT>
bool operator()(const ArgumentT &Arg) {
if (P(Arg)) {
set_.erase(Arg);
return true;
}
return false;
}
};
set_type set_; ///< The set.
vector_type vector_; ///< The vector.
};
/// \brief A SetVector that performs no allocations if smaller than
/// a certain size.
template <typename T, unsigned N>
class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > {
public:
SmallSetVector() {}
/// \brief Initialize a SmallSetVector with a range of elements
template<typename It>
SmallSetVector(It Start, It End) {
this->insert(Start, End);
}
};
} // End llvm namespace
// vim: sw=2 ai
#endif

View File

@ -1,595 +0,0 @@
//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the SmallBitVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLBITVECTOR_H
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
namespace llvm {
/// This is a 'bitvector' (really, a variable-sized bit array), optimized for
/// the case when the array is small. It contains one pointer-sized field, which
/// is directly used as a plain collection of bits when possible, or as a
/// pointer to a larger heap-allocated array when necessary. This allows normal
/// "small" cases to be fast without losing generality for large inputs.
class SmallBitVector {
// TODO: In "large" mode, a pointer to a BitVector is used, leading to an
// unnecessary level of indirection. It would be more efficient to use a
// pointer to memory containing size, allocation size, and the array of bits.
uintptr_t X;
enum {
// The number of bits in this class.
NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,
// One bit is used to discriminate between small and large mode. The
// remaining bits are used for the small-mode representation.
SmallNumRawBits = NumBaseBits - 1,
// A few more bits are used to store the size of the bit set in small mode.
// Theoretically this is a ceil-log2. These bits are encoded in the most
// significant bits of the raw bits.
SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
NumBaseBits == 64 ? 6 :
SmallNumRawBits),
// The remaining bits are used to store the actual set in small mode.
SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
};
static_assert(NumBaseBits == 64 || NumBaseBits == 32,
"Unsupported word size");
public:
typedef unsigned size_type;
// Encapsulation of a single bit.
class reference {
SmallBitVector &TheVector;
unsigned BitPos;
public:
reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
reference(const reference&) = default;
reference& operator=(reference t) {
*this = bool(t);
return *this;
}
reference& operator=(bool t) {
if (t)
TheVector.set(BitPos);
else
TheVector.reset(BitPos);
return *this;
}
operator bool() const {
return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
}
};
private:
bool isSmall() const {
return X & uintptr_t(1);
}
BitVector *getPointer() const {
assert(!isSmall());
return reinterpret_cast<BitVector *>(X);
}
void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) {
X = 1;
setSmallSize(NewSize);
setSmallBits(NewSmallBits);
}
void switchToLarge(BitVector *BV) {
X = reinterpret_cast<uintptr_t>(BV);
assert(!isSmall() && "Tried to use an unaligned pointer");
}
// Return all the bits used for the "small" representation; this includes
// bits for the size as well as the element bits.
uintptr_t getSmallRawBits() const {
assert(isSmall());
return X >> 1;
}
void setSmallRawBits(uintptr_t NewRawBits) {
assert(isSmall());
X = (NewRawBits << 1) | uintptr_t(1);
}
// Return the size.
size_t getSmallSize() const {
return getSmallRawBits() >> SmallNumDataBits;
}
void setSmallSize(size_t Size) {
setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
}
// Return the element bits.
uintptr_t getSmallBits() const {
return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
}
void setSmallBits(uintptr_t NewBits) {
setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
(getSmallSize() << SmallNumDataBits));
}
public:
/// Creates an empty bitvector.
SmallBitVector() : X(1) {}
/// Creates a bitvector of specified number of bits. All bits are initialized
/// to the specified value.
explicit SmallBitVector(unsigned s, bool t = false) {
if (s <= SmallNumDataBits)
switchToSmall(t ? ~uintptr_t(0) : 0, s);
else
switchToLarge(new BitVector(s, t));
}
/// SmallBitVector copy ctor.
SmallBitVector(const SmallBitVector &RHS) {
if (RHS.isSmall())
X = RHS.X;
else
switchToLarge(new BitVector(*RHS.getPointer()));
}
SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
RHS.X = 1;
}
~SmallBitVector() {
if (!isSmall())
delete getPointer();
}
/// Tests whether there are no bits in this bitvector.
bool empty() const {
return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
}
/// Returns the number of bits in this bitvector.
size_t size() const {
return isSmall() ? getSmallSize() : getPointer()->size();
}
/// Returns the number of bits which are set.
size_type count() const {
if (isSmall()) {
uintptr_t Bits = getSmallBits();
return countPopulation(Bits);
}
return getPointer()->count();
}
/// Returns true if any bit is set.
bool any() const {
if (isSmall())
return getSmallBits() != 0;
return getPointer()->any();
}
/// Returns true if all bits are set.
bool all() const {
if (isSmall())
return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
return getPointer()->all();
}
/// Returns true if none of the bits are set.
bool none() const {
if (isSmall())
return getSmallBits() == 0;
return getPointer()->none();
}
/// Returns the index of the first set bit, -1 if none of the bits are set.
int find_first() const {
if (isSmall()) {
uintptr_t Bits = getSmallBits();
if (Bits == 0)
return -1;
return countTrailingZeros(Bits);
}
return getPointer()->find_first();
}
/// Returns the index of the next set bit following the "Prev" bit.
/// Returns -1 if the next set bit is not found.
int find_next(unsigned Prev) const {
if (isSmall()) {
uintptr_t Bits = getSmallBits();
// Mask off previous bits.
Bits &= ~uintptr_t(0) << (Prev + 1);
if (Bits == 0 || Prev + 1 >= getSmallSize())
return -1;
return countTrailingZeros(Bits);
}
return getPointer()->find_next(Prev);
}
/// Clear all bits.
void clear() {
if (!isSmall())
delete getPointer();
switchToSmall(0, 0);
}
/// Grow or shrink the bitvector.
void resize(unsigned N, bool t = false) {
if (!isSmall()) {
getPointer()->resize(N, t);
} else if (SmallNumDataBits >= N) {
uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
setSmallSize(N);
setSmallBits(NewBits | getSmallBits());
} else {
BitVector *BV = new BitVector(N, t);
uintptr_t OldBits = getSmallBits();
for (size_t i = 0, e = getSmallSize(); i != e; ++i)
(*BV)[i] = (OldBits >> i) & 1;
switchToLarge(BV);
}
}
void reserve(unsigned N) {
if (isSmall()) {
if (N > SmallNumDataBits) {
uintptr_t OldBits = getSmallRawBits();
size_t SmallSize = getSmallSize();
BitVector *BV = new BitVector(SmallSize);
for (size_t i = 0; i < SmallSize; ++i)
if ((OldBits >> i) & 1)
BV->set(i);
BV->reserve(N);
switchToLarge(BV);
}
} else {
getPointer()->reserve(N);
}
}
// Set, reset, flip
SmallBitVector &set() {
if (isSmall())
setSmallBits(~uintptr_t(0));
else
getPointer()->set();
return *this;
}
SmallBitVector &set(unsigned Idx) {
if (isSmall()) {
assert(Idx <= static_cast<unsigned>(
std::numeric_limits<uintptr_t>::digits) &&
"undefined behavior");
setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
}
else
getPointer()->set(Idx);
return *this;
}
/// Efficiently set a range of bits in [I, E)
SmallBitVector &set(unsigned I, unsigned E) {
assert(I <= E && "Attempted to set backwards range!");
assert(E <= size() && "Attempted to set out-of-bounds range!");
if (I == E) return *this;
if (isSmall()) {
uintptr_t EMask = ((uintptr_t)1) << E;
uintptr_t IMask = ((uintptr_t)1) << I;
uintptr_t Mask = EMask - IMask;
setSmallBits(getSmallBits() | Mask);
} else
getPointer()->set(I, E);
return *this;
}
SmallBitVector &reset() {
if (isSmall())
setSmallBits(0);
else
getPointer()->reset();
return *this;
}
SmallBitVector &reset(unsigned Idx) {
if (isSmall())
setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
else
getPointer()->reset(Idx);
return *this;
}
/// Efficiently reset a range of bits in [I, E)
SmallBitVector &reset(unsigned I, unsigned E) {
assert(I <= E && "Attempted to reset backwards range!");
assert(E <= size() && "Attempted to reset out-of-bounds range!");
if (I == E) return *this;
if (isSmall()) {
uintptr_t EMask = ((uintptr_t)1) << E;
uintptr_t IMask = ((uintptr_t)1) << I;
uintptr_t Mask = EMask - IMask;
setSmallBits(getSmallBits() & ~Mask);
} else
getPointer()->reset(I, E);
return *this;
}
SmallBitVector &flip() {
if (isSmall())
setSmallBits(~getSmallBits());
else
getPointer()->flip();
return *this;
}
SmallBitVector &flip(unsigned Idx) {
if (isSmall())
setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
else
getPointer()->flip(Idx);
return *this;
}
// No argument flip.
SmallBitVector operator~() const {
return SmallBitVector(*this).flip();
}
// Indexing.
reference operator[](unsigned Idx) {
assert(Idx < size() && "Out-of-bounds Bit access.");
return reference(*this, Idx);
}
bool operator[](unsigned Idx) const {
assert(Idx < size() && "Out-of-bounds Bit access.");
if (isSmall())
return ((getSmallBits() >> Idx) & 1) != 0;
return getPointer()->operator[](Idx);
}
bool test(unsigned Idx) const {
return (*this)[Idx];
}
/// Test if any common bits are set.
bool anyCommon(const SmallBitVector &RHS) const {
if (isSmall() && RHS.isSmall())
return (getSmallBits() & RHS.getSmallBits()) != 0;
if (!isSmall() && !RHS.isSmall())
return getPointer()->anyCommon(*RHS.getPointer());
for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
if (test(i) && RHS.test(i))
return true;
return false;
}
// Comparison operators.
bool operator==(const SmallBitVector &RHS) const {
if (size() != RHS.size())
return false;
if (isSmall())
return getSmallBits() == RHS.getSmallBits();
else
return *getPointer() == *RHS.getPointer();
}
bool operator!=(const SmallBitVector &RHS) const {
return !(*this == RHS);
}
// Intersection, union, disjoint union.
SmallBitVector &operator&=(const SmallBitVector &RHS) {
resize(std::max(size(), RHS.size()));
if (isSmall())
setSmallBits(getSmallBits() & RHS.getSmallBits());
else if (!RHS.isSmall())
getPointer()->operator&=(*RHS.getPointer());
else {
SmallBitVector Copy = RHS;
Copy.resize(size());
getPointer()->operator&=(*Copy.getPointer());
}
return *this;
}
/// Reset bits that are set in RHS. Same as *this &= ~RHS.
SmallBitVector &reset(const SmallBitVector &RHS) {
if (isSmall() && RHS.isSmall())
setSmallBits(getSmallBits() & ~RHS.getSmallBits());
else if (!isSmall() && !RHS.isSmall())
getPointer()->reset(*RHS.getPointer());
else
for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
if (RHS.test(i))
reset(i);
return *this;
}
/// Check if (This - RHS) is zero. This is the same as reset(RHS) and any().
bool test(const SmallBitVector &RHS) const {
if (isSmall() && RHS.isSmall())
return (getSmallBits() & ~RHS.getSmallBits()) != 0;
if (!isSmall() && !RHS.isSmall())
return getPointer()->test(*RHS.getPointer());
unsigned i, e;
for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
if (test(i) && !RHS.test(i))
return true;
for (e = size(); i != e; ++i)
if (test(i))
return true;
return false;
}
SmallBitVector &operator|=(const SmallBitVector &RHS) {
resize(std::max(size(), RHS.size()));
if (isSmall())
setSmallBits(getSmallBits() | RHS.getSmallBits());
else if (!RHS.isSmall())
getPointer()->operator|=(*RHS.getPointer());
else {
SmallBitVector Copy = RHS;
Copy.resize(size());
getPointer()->operator|=(*Copy.getPointer());
}
return *this;
}
SmallBitVector &operator^=(const SmallBitVector &RHS) {
resize(std::max(size(), RHS.size()));
if (isSmall())
setSmallBits(getSmallBits() ^ RHS.getSmallBits());
else if (!RHS.isSmall())
getPointer()->operator^=(*RHS.getPointer());
else {
SmallBitVector Copy = RHS;
Copy.resize(size());
getPointer()->operator^=(*Copy.getPointer());
}
return *this;
}
// Assignment operator.
const SmallBitVector &operator=(const SmallBitVector &RHS) {
if (isSmall()) {
if (RHS.isSmall())
X = RHS.X;
else
switchToLarge(new BitVector(*RHS.getPointer()));
} else {
if (!RHS.isSmall())
*getPointer() = *RHS.getPointer();
else {
delete getPointer();
X = RHS.X;
}
}
return *this;
}
const SmallBitVector &operator=(SmallBitVector &&RHS) {
if (this != &RHS) {
clear();
swap(RHS);
}
return *this;
}
void swap(SmallBitVector &RHS) {
std::swap(X, RHS.X);
}
/// Add '1' bits from Mask to this vector. Don't resize.
/// This computes "*this |= Mask".
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<true, false>(Mask, MaskWords);
else
getPointer()->setBitsInMask(Mask, MaskWords);
}
/// Clear any bits in this vector that are set in Mask. Don't resize.
/// This computes "*this &= ~Mask".
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<false, false>(Mask, MaskWords);
else
getPointer()->clearBitsInMask(Mask, MaskWords);
}
/// Add a bit to this vector for every '0' bit in Mask. Don't resize.
/// This computes "*this |= ~Mask".
void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<true, true>(Mask, MaskWords);
else
getPointer()->setBitsNotInMask(Mask, MaskWords);
}
/// Clear a bit in this vector for every '0' bit in Mask. Don't resize.
/// This computes "*this &= Mask".
void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<false, true>(Mask, MaskWords);
else
getPointer()->clearBitsNotInMask(Mask, MaskWords);
}
private:
template <bool AddBits, bool InvertMask>
void applyMask(const uint32_t *Mask, unsigned MaskWords) {
assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!");
uintptr_t M = Mask[0];
if (NumBaseBits == 64)
M |= uint64_t(Mask[1]) << 32;
if (InvertMask)
M = ~M;
if (AddBits)
setSmallBits(getSmallBits() | M);
else
setSmallBits(getSmallBits() & ~M);
}
};
inline SmallBitVector
operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
SmallBitVector Result(LHS);
Result &= RHS;
return Result;
}
inline SmallBitVector
operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
SmallBitVector Result(LHS);
Result |= RHS;
return Result;
}
inline SmallBitVector
operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
SmallBitVector Result(LHS);
Result ^= RHS;
return Result;
}
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of BitVector swap.
inline void
swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
LHS.swap(RHS);
}
}
#endif

View File

@ -1,406 +0,0 @@
//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallPtrSet class. See the doxygen comment for
// SmallPtrSetImplBase for more details on the algorithm used.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstddef>
#include <cstring>
#include <cstdlib>
#include <iterator>
#include <utility>
namespace llvm {
class SmallPtrSetIteratorImpl;
/// SmallPtrSetImplBase - This is the common code shared among all the
/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
/// for small and one for large sets.
///
/// Small sets use an array of pointers allocated in the SmallPtrSet object,
/// which is treated as a simple array of pointers. When a pointer is added to
/// the set, the array is scanned to see if the element already exists, if not
/// the element is 'pushed back' onto the array. If we run out of space in the
/// array, we grow into the 'large set' case. SmallSet should be used when the
/// sets are often small. In this case, no memory allocation is used, and only
/// light-weight and cache-efficient scanning is used.
///
/// Large sets use a classic exponentially-probed hash table. Empty buckets are
/// represented with an illegal pointer value (-1) to allow null pointers to be
/// inserted. Tombstones are represented with another illegal pointer value
/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
/// more. When this happens, the table is doubled in size.
///
class SmallPtrSetImplBase {
friend class SmallPtrSetIteratorImpl;
protected:
/// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
const void **SmallArray;
/// CurArray - This is the current set of buckets. If equal to SmallArray,
/// then the set is in 'small mode'.
const void **CurArray;
/// CurArraySize - The allocated size of CurArray, always a power of two.
unsigned CurArraySize;
/// Number of elements in CurArray that contain a value or are a tombstone.
/// If small, all these elements are at the beginning of CurArray and the rest
/// is uninitialized.
unsigned NumNonEmpty;
/// Number of tombstones in CurArray.
unsigned NumTombstones;
// Helpers to copy and move construct a SmallPtrSet.
SmallPtrSetImplBase(const void **SmallStorage,
const SmallPtrSetImplBase &that);
SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
SmallPtrSetImplBase &&that);
explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
: SmallArray(SmallStorage), CurArray(SmallStorage),
CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
"Initial size must be a power of two!");
}
~SmallPtrSetImplBase() {
if (!isSmall())
free(CurArray);
}
public:
typedef unsigned size_type;
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return size() == 0; }
size_type size() const { return NumNonEmpty - NumTombstones; }
void clear() {
// If the capacity of the array is huge, and the # elements used is small,
// shrink the array.
if (!isSmall()) {
if (size() * 4 < CurArraySize && CurArraySize > 32)
return shrink_and_clear();
// Fill the array with empty markers.
memset(CurArray, -1, CurArraySize * sizeof(void *));
}
NumNonEmpty = 0;
NumTombstones = 0;
}
protected:
static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
static void *getEmptyMarker() {
// Note that -1 is chosen to make clear() efficiently implementable with
// memset and because it's not a valid pointer value.
return reinterpret_cast<void*>(-1);
}
const void **EndPointer() const {
return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
}
/// insert_imp - This returns true if the pointer was new to the set, false if
/// it was already in the set. This is hidden from the client so that the
/// derived class can check that the right type of pointer is passed in.
std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
if (isSmall()) {
// Check to see if it is already in the set.
const void **LastTombstone = nullptr;
for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
APtr != E; ++APtr) {
const void *Value = *APtr;
if (Value == Ptr)
return std::make_pair(APtr, false);
if (Value == getTombstoneMarker())
LastTombstone = APtr;
}
// Did we find any tombstone marker?
if (LastTombstone != nullptr) {
*LastTombstone = Ptr;
--NumTombstones;
return std::make_pair(LastTombstone, true);
}
// Nope, there isn't. If we stay small, just 'pushback' now.
if (NumNonEmpty < CurArraySize) {
SmallArray[NumNonEmpty++] = Ptr;
return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
}
// Otherwise, hit the big set case, which will call grow.
}
return insert_imp_big(Ptr);
}
/// erase_imp - If the set contains the specified pointer, remove it and
/// return true, otherwise return false. This is hidden from the client so
/// that the derived class can check that the right type of pointer is passed
/// in.
bool erase_imp(const void * Ptr);
bool count_imp(const void * Ptr) const {
if (isSmall()) {
// Linear search for the item.
for (const void *const *APtr = SmallArray,
*const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
if (*APtr == Ptr)
return true;
return false;
}
// Big set case.
return *FindBucketFor(Ptr) == Ptr;
}
private:
bool isSmall() const { return CurArray == SmallArray; }
std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);
const void * const *FindBucketFor(const void *Ptr) const;
void shrink_and_clear();
/// Grow - Allocate a larger backing store for the buckets and move it over.
void Grow(unsigned NewSize);
void operator=(const SmallPtrSetImplBase &RHS) = delete;
protected:
/// swap - Swaps the elements of two sets.
/// Note: This method assumes that both sets have the same small size.
void swap(SmallPtrSetImplBase &RHS);
void CopyFrom(const SmallPtrSetImplBase &RHS);
void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
private:
/// Code shared by MoveFrom() and move constructor.
void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
/// Code shared by CopyFrom() and copy constructor.
void CopyHelper(const SmallPtrSetImplBase &RHS);
};
/// SmallPtrSetIteratorImpl - This is the common base class shared between all
/// instances of SmallPtrSetIterator.
class SmallPtrSetIteratorImpl {
protected:
const void *const *Bucket;
const void *const *End;
public:
explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
: Bucket(BP), End(E) {
AdvanceIfNotValid();
}
bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
return Bucket == RHS.Bucket;
}
bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
return Bucket != RHS.Bucket;
}
protected:
/// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
/// that is. This is guaranteed to stop because the end() bucket is marked
/// valid.
void AdvanceIfNotValid() {
assert(Bucket <= End);
while (Bucket != End &&
(*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
*Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
++Bucket;
}
};
/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
template<typename PtrTy>
class SmallPtrSetIterator : public SmallPtrSetIteratorImpl {
typedef PointerLikeTypeTraits<PtrTy> PtrTraits;
public:
typedef PtrTy value_type;
typedef PtrTy reference;
typedef PtrTy pointer;
typedef std::ptrdiff_t difference_type;
typedef std::forward_iterator_tag iterator_category;
explicit SmallPtrSetIterator(const void *const *BP, const void *const *E)
: SmallPtrSetIteratorImpl(BP, E) {}
// Most methods provided by baseclass.
const PtrTy operator*() const {
assert(Bucket < End);
return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
}
inline SmallPtrSetIterator& operator++() { // Preincrement
++Bucket;
AdvanceIfNotValid();
return *this;
}
SmallPtrSetIterator operator++(int) { // Postincrement
SmallPtrSetIterator tmp = *this; ++*this; return tmp;
}
};
/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
/// power of two (which means N itself if N is already a power of two).
template<unsigned N>
struct RoundUpToPowerOfTwo;
/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
/// helper template used to implement RoundUpToPowerOfTwo.
template<unsigned N, bool isPowerTwo>
struct RoundUpToPowerOfTwoH {
enum { Val = N };
};
template<unsigned N>
struct RoundUpToPowerOfTwoH<N, false> {
enum {
// We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
// the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
};
};
template<unsigned N>
struct RoundUpToPowerOfTwo {
enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
/// \brief A templated base class for \c SmallPtrSet which provides the
/// typesafe interface that is common across all small sizes.
///
/// This is particularly useful for passing around between interface boundaries
/// to avoid encoding a particular small size in the interface boundary.
template <typename PtrType>
class SmallPtrSetImpl : public SmallPtrSetImplBase {
typedef PointerLikeTypeTraits<PtrType> PtrTraits;
SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
protected:
// Constructors that forward to the base.
SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that)
: SmallPtrSetImplBase(SmallStorage, that) {}
SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize,
SmallPtrSetImpl &&that)
: SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {}
explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize)
: SmallPtrSetImplBase(SmallStorage, SmallSize) {}
public:
typedef SmallPtrSetIterator<PtrType> iterator;
typedef SmallPtrSetIterator<PtrType> const_iterator;
/// Inserts Ptr if and only if there is no element in the container equal to
/// Ptr. The bool component of the returned pair is true if and only if the
/// insertion takes place, and the iterator component of the pair points to
/// the element equal to Ptr.
std::pair<iterator, bool> insert(PtrType Ptr) {
auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
return std::make_pair(iterator(p.first, EndPointer()), p.second);
}
/// erase - If the set contains the specified pointer, remove it and return
/// true, otherwise return false.
bool erase(PtrType Ptr) {
return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
}
/// count - Return 1 if the specified pointer is in the set, 0 otherwise.
size_type count(PtrType Ptr) const {
return count_imp(PtrTraits::getAsVoidPointer(Ptr)) ? 1 : 0;
}
template <typename IterT>
void insert(IterT I, IterT E) {
for (; I != E; ++I)
insert(*I);
}
inline iterator begin() const {
return iterator(CurArray, EndPointer());
}
inline iterator end() const {
const void *const *End = EndPointer();
return iterator(End, End);
}
};
/// SmallPtrSet - This class implements a set which is optimized for holding
/// SmallSize or less elements. This internally rounds up SmallSize to the next
/// power of two if it is not already a power of two. See the comments above
/// SmallPtrSetImplBase for details of the algorithm.
template<class PtrType, unsigned SmallSize>
class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
// In small mode SmallPtrSet uses linear search for the elements, so it is
// not a good idea to choose this value too high. You may consider using a
// DenseSet<> instead if you expect many elements in the set.
static_assert(SmallSize <= 32, "SmallSize should be small");
typedef SmallPtrSetImpl<PtrType> BaseT;
// Make sure that SmallSize is a power of two, round up if not.
enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
/// SmallStorage - Fixed size storage used in 'small mode'.
const void *SmallStorage[SmallSizePowTwo];
public:
SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
SmallPtrSet(SmallPtrSet &&that)
: BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
template<typename It>
SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
this->insert(I, E);
}
SmallPtrSet<PtrType, SmallSize> &
operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
if (&RHS != this)
this->CopyFrom(RHS);
return *this;
}
SmallPtrSet<PtrType, SmallSize>&
operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
if (&RHS != this)
this->MoveFrom(SmallSizePowTwo, std::move(RHS));
return *this;
}
/// swap - Swaps the elements of two sets.
void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
SmallPtrSetImplBase::swap(RHS);
}
};
}
namespace std {
/// Implement std::swap in terms of SmallPtrSet swap.
template<class T, unsigned N>
inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
LHS.swap(RHS);
}
}
#endif

View File

@ -1,136 +0,0 @@
//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallSet class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLSET_H
#define LLVM_ADT_SMALLSET_H
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include <set>
namespace llvm {
/// SmallSet - This maintains a set of unique values, optimizing for the case
/// when the set is small (less than N). In this case, the set can be
/// maintained with no mallocs. If the set gets large, we expand to using an
/// std::set to maintain reasonable lookup times.
///
/// Note that this set does not provide a way to iterate over members in the
/// set.
template <typename T, unsigned N, typename C = std::less<T> >
class SmallSet {
/// Use a SmallVector to hold the elements here (even though it will never
/// reach its 'large' stage) to avoid calling the default ctors of elements
/// we will never use.
SmallVector<T, N> Vector;
std::set<T, C> Set;
typedef typename SmallVector<T, N>::const_iterator VIterator;
typedef typename SmallVector<T, N>::iterator mutable_iterator;
// In small mode SmallPtrSet uses linear search for the elements, so it is
// not a good idea to choose this value too high. You may consider using a
// DenseSet<> instead if you expect many elements in the set.
static_assert(N <= 32, "N should be small");
public:
typedef size_t size_type;
SmallSet() {}
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
return Vector.empty() && Set.empty();
}
size_type size() const {
return isSmall() ? Vector.size() : Set.size();
}
/// count - Return 1 if the element is in the set, 0 otherwise.
size_type count(const T &V) const {
if (isSmall()) {
// Since the collection is small, just do a linear search.
return vfind(V) == Vector.end() ? 0 : 1;
} else {
return Set.count(V);
}
}
/// insert - Insert an element into the set if it isn't already there.
/// Returns true if the element is inserted (it was not in the set before).
/// The first value of the returned pair is unused and provided for
/// partial compatibility with the standard library self-associative container
/// concept.
// FIXME: Add iterators that abstract over the small and large form, and then
// return those here.
std::pair<NoneType, bool> insert(const T &V) {
if (!isSmall())
return std::make_pair(None, Set.insert(V).second);
VIterator I = vfind(V);
if (I != Vector.end()) // Don't reinsert if it already exists.
return std::make_pair(None, false);
if (Vector.size() < N) {
Vector.push_back(V);
return std::make_pair(None, true);
}
// Otherwise, grow from vector to set.
while (!Vector.empty()) {
Set.insert(Vector.back());
Vector.pop_back();
}
Set.insert(V);
return std::make_pair(None, true);
}
template <typename IterT>
void insert(IterT I, IterT E) {
for (; I != E; ++I)
insert(*I);
}
bool erase(const T &V) {
if (!isSmall())
return Set.erase(V);
for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
if (*I == V) {
Vector.erase(I);
return true;
}
return false;
}
void clear() {
Vector.clear();
Set.clear();
}
private:
bool isSmall() const { return Set.empty(); }
VIterator vfind(const T &V) const {
for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
if (*I == V)
return I;
return Vector.end();
}
};
/// If this set is of pointer values, transparently switch over to using
/// SmallPtrSet for performance.
template <typename PointeeType, unsigned N>
class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
} // end namespace llvm
#endif

View File

@ -1,297 +0,0 @@
//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallString class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLSTRING_H
#define LLVM_ADT_SMALLSTRING_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
namespace llvm {
/// SmallString - A SmallString is just a SmallVector with methods and accessors
/// that make it work better as a string (e.g. operator+ etc).
template<unsigned InternalLen>
class SmallString : public SmallVector<char, InternalLen> {
public:
/// Default ctor - Initialize to empty.
SmallString() {}
/// Initialize from a StringRef.
SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
/// Initialize with a range.
template<typename ItTy>
SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
// Note that in order to add new overloads for append & assign, we have to
// duplicate the inherited versions so as not to inadvertently hide them.
/// @}
/// @name String Assignment
/// @{
/// Assign from a repeated element.
void assign(size_t NumElts, char Elt) {
this->SmallVectorImpl<char>::assign(NumElts, Elt);
}
/// Assign from an iterator pair.
template<typename in_iter>
void assign(in_iter S, in_iter E) {
this->clear();
SmallVectorImpl<char>::append(S, E);
}
/// Assign from a StringRef.
void assign(StringRef RHS) {
this->clear();
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
/// Assign from a SmallVector.
void assign(const SmallVectorImpl<char> &RHS) {
this->clear();
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
/// @}
/// @name String Concatenation
/// @{
/// Append from an iterator pair.
template<typename in_iter>
void append(in_iter S, in_iter E) {
SmallVectorImpl<char>::append(S, E);
}
void append(size_t NumInputs, char Elt) {
SmallVectorImpl<char>::append(NumInputs, Elt);
}
/// Append from a StringRef.
void append(StringRef RHS) {
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
/// Append from a SmallVector.
void append(const SmallVectorImpl<char> &RHS) {
SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
}
/// @}
/// @name String Comparison
/// @{
/// Check for string equality. This is more efficient than compare() when
/// the relative ordering of inequal strings isn't needed.
bool equals(StringRef RHS) const {
return str().equals(RHS);
}
/// Check for string equality, ignoring case.
bool equals_lower(StringRef RHS) const {
return str().equals_lower(RHS);
}
/// Compare two strings; the result is -1, 0, or 1 if this string is
/// lexicographically less than, equal to, or greater than the \p RHS.
int compare(StringRef RHS) const {
return str().compare(RHS);
}
/// compare_lower - Compare two strings, ignoring case.
int compare_lower(StringRef RHS) const {
return str().compare_lower(RHS);
}
/// compare_numeric - Compare two strings, treating sequences of digits as
/// numbers.
int compare_numeric(StringRef RHS) const {
return str().compare_numeric(RHS);
}
/// @}
/// @name String Predicates
/// @{
/// startswith - Check if this string starts with the given \p Prefix.
bool startswith(StringRef Prefix) const {
return str().startswith(Prefix);
}
/// endswith - Check if this string ends with the given \p Suffix.
bool endswith(StringRef Suffix) const {
return str().endswith(Suffix);
}
/// @}
/// @name String Searching
/// @{
/// find - Search for the first character \p C in the string.
///
/// \return - The index of the first occurrence of \p C, or npos if not
/// found.
size_t find(char C, size_t From = 0) const {
return str().find(C, From);
}
/// Search for the first string \p Str in the string.
///
/// \returns The index of the first occurrence of \p Str, or npos if not
/// found.
size_t find(StringRef Str, size_t From = 0) const {
return str().find(Str, From);
}
/// Search for the last character \p C in the string.
///
/// \returns The index of the last occurrence of \p C, or npos if not
/// found.
size_t rfind(char C, size_t From = StringRef::npos) const {
return str().rfind(C, From);
}
/// Search for the last string \p Str in the string.
///
/// \returns The index of the last occurrence of \p Str, or npos if not
/// found.
size_t rfind(StringRef Str) const {
return str().rfind(Str);
}
/// Find the first character in the string that is \p C, or npos if not
/// found. Same as find.
size_t find_first_of(char C, size_t From = 0) const {
return str().find_first_of(C, From);
}
/// Find the first character in the string that is in \p Chars, or npos if
/// not found.
///
/// Complexity: O(size() + Chars.size())
size_t find_first_of(StringRef Chars, size_t From = 0) const {
return str().find_first_of(Chars, From);
}
/// Find the first character in the string that is not \p C or npos if not
/// found.
size_t find_first_not_of(char C, size_t From = 0) const {
return str().find_first_not_of(C, From);
}
/// Find the first character in the string that is not in the string
/// \p Chars, or npos if not found.
///
/// Complexity: O(size() + Chars.size())
size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
return str().find_first_not_of(Chars, From);
}
/// Find the last character in the string that is \p C, or npos if not
/// found.
size_t find_last_of(char C, size_t From = StringRef::npos) const {
return str().find_last_of(C, From);
}
/// Find the last character in the string that is in \p C, or npos if not
/// found.
///
/// Complexity: O(size() + Chars.size())
size_t find_last_of(
StringRef Chars, size_t From = StringRef::npos) const {
return str().find_last_of(Chars, From);
}
/// @}
/// @name Helpful Algorithms
/// @{
/// Return the number of occurrences of \p C in the string.
size_t count(char C) const {
return str().count(C);
}
/// Return the number of non-overlapped occurrences of \p Str in the
/// string.
size_t count(StringRef Str) const {
return str().count(Str);
}
/// @}
/// @name Substring Operations
/// @{
/// Return a reference to the substring from [Start, Start + N).
///
/// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
/// \param N The number of characters to included in the substring. If \p N
/// exceeds the number of characters remaining in the string, the string
/// suffix (starting with \p Start) will be returned.
StringRef substr(size_t Start, size_t N = StringRef::npos) const {
return str().substr(Start, N);
}
/// Return a reference to the substring from [Start, End).
///
/// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
/// \param End The index following the last character to include in the
/// substring. If this is npos, or less than \p Start, or exceeds the
/// number of characters remaining in the string, the string suffix
/// (starting with \p Start) will be returned.
StringRef slice(size_t Start, size_t End) const {
return str().slice(Start, End);
}
// Extra methods.
/// Explicit conversion to StringRef.
StringRef str() const { return StringRef(this->begin(), this->size()); }
// TODO: Make this const, if it's safe...
const char* c_str() {
this->push_back(0);
this->pop_back();
return this->data();
}
/// Implicit conversion to StringRef.
operator StringRef() const { return str(); }
// Extra operators.
const SmallString &operator=(StringRef RHS) {
this->clear();
return *this += RHS;
}
SmallString &operator+=(StringRef RHS) {
this->append(RHS.begin(), RHS.end());
return *this;
}
SmallString &operator+=(char C) {
this->push_back(C);
return *this;
}
};
}
#endif

View File

@ -1,927 +0,0 @@
//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SmallVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <memory>
namespace llvm {
/// This is all the non-templated stuff common to all SmallVectors.
class SmallVectorBase {
protected:
void *BeginX, *EndX, *CapacityX;
protected:
SmallVectorBase(void *FirstEl, size_t Size)
: BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
/// This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize);
public:
/// This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return BeginX == EndX; }
};
template <typename T, unsigned N> struct SmallVectorStorage;
/// This is the part of SmallVectorTemplateBase which does not depend on whether
/// the type T is a POD. The extra dummy template argument is used by ArrayRef
/// to avoid unnecessarily requiring T to be complete.
template <typename T, typename = void>
class SmallVectorTemplateCommon : public SmallVectorBase {
private:
template <typename, unsigned> friend struct SmallVectorStorage;
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. Use an array of char of sufficient alignment.
typedef llvm::AlignedCharArrayUnion<T> U;
U FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
protected:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
void grow_pod(size_t MinSizeInBytes, size_t TSize) {
SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
}
/// Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
/// Put this vector in a state of being small.
void resetToSmall() {
BeginX = EndX = CapacityX = &FirstEl;
}
void setEnd(T *P) { this->EndX = P; }
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
// forward iterator creation methods.
LLVM_ATTRIBUTE_ALWAYS_INLINE
iterator begin() { return (iterator)this->BeginX; }
LLVM_ATTRIBUTE_ALWAYS_INLINE
const_iterator begin() const { return (const_iterator)this->BeginX; }
LLVM_ATTRIBUTE_ALWAYS_INLINE
iterator end() { return (iterator)this->EndX; }
LLVM_ATTRIBUTE_ALWAYS_INLINE
const_iterator end() const { return (const_iterator)this->EndX; }
protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
LLVM_ATTRIBUTE_ALWAYS_INLINE
size_type size() const { return end()-begin(); }
size_type max_size() const { return size_type(-1) / sizeof(T); }
/// Return the total number of elements in the currently allocated buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
/// Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
LLVM_ATTRIBUTE_ALWAYS_INLINE
reference operator[](size_type idx) {
assert(idx < size());
return begin()[idx];
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
const_reference operator[](size_type idx) const {
assert(idx < size());
return begin()[idx];
}
reference front() {
assert(!empty());
return begin()[0];
}
const_reference front() const {
assert(!empty());
return begin()[0];
}
reference back() {
assert(!empty());
return end()[-1];
}
const_reference back() const {
assert(!empty());
return end()[-1];
}
};
/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
/// implementations that are designed to work with non-POD-like T's.
template <typename T, bool isPodLike>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
static void destroy_range(T *S, T *E) {
while (S != E) {
--E;
E->~T();
}
}
/// Move the range [I, E) into the uninitialized memory starting with "Dest",
/// constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(std::make_move_iterator(I),
std::make_move_iterator(E), Dest);
}
/// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
/// constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
/// Grow the allocated memory (without initializing new elements), doubling
/// the size of the allocated memory. Guarantees space for at least one more
/// element, or MinSize more elements if specified.
void grow(size_t MinSize = 0);
public:
void push_back(const T &Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
}
void push_back(T &&Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void*) this->end()) T(::std::move(Elt));
this->setEnd(this->end()+1);
}
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
};
// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
// Always grow, even from zero.
size_t NewCapacity = size_t(NextPowerOf2(CurCapacity+2));
if (NewCapacity < MinSize)
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
// Move the elements over.
this->uninitialized_move(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
free(this->begin());
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
/// implementations that are designed to work with POD-like T's.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
protected:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
/// Move the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
// Just do a copy.
uninitialized_copy(I, E, Dest);
}
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
// Arbitrary iterator types; just use the basic implementation.
std::uninitialized_copy(I, E, Dest);
}
/// Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template <typename T1, typename T2>
static void uninitialized_copy(
T1 *I, T1 *E, T2 *Dest,
typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
T2>::value>::type * = nullptr) {
// Use memcpy for PODs iterated by pointers (which includes SmallVector
// iterators): std::uninitialized_copy optimizes to memmove, but we can
// use memcpy here. Note that I and E are iterators and thus might be
// invalid for memcpy if they are equal.
if (I != E)
memcpy(Dest, I, (E - I) * sizeof(T));
}
/// Double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
public:
void push_back(const T &Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
}
void pop_back() {
this->setEnd(this->end()-1);
}
};
/// This class consists of common code factored out of the SmallVector class to
/// reduce code duplication based on the SmallVector 'N' template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
SmallVectorImpl(const SmallVectorImpl&) = delete;
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::const_iterator const_iterator;
typedef typename SuperClass::size_type size_type;
protected:
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
public:
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
free(this->begin());
}
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
}
void resize(size_type N) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
new (&*I) T();
this->setEnd(this->begin()+N);
}
}
void resize(size_type N, const T &NV) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
std::uninitialized_fill(this->end(), this->begin()+N, NV);
this->setEnd(this->begin()+N);
}
}
void reserve(size_type N) {
if (this->capacity() < N)
this->grow(N);
}
T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
T Result = ::std::move(this->back());
this->pop_back();
return Result;
}
void swap(SmallVectorImpl &RHS);
/// Add the specified range to the end of the SmallVector.
template<typename in_iter>
void append(in_iter in_start, in_iter in_end) {
size_type NumInputs = std::distance(in_start, in_end);
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
this->uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
/// Add the specified range to the end of the SmallVector.
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
void append(std::initializer_list<T> IL) {
append(IL.begin(), IL.end());
}
void assign(size_type NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
this->grow(NumElts);
this->setEnd(this->begin()+NumElts);
std::uninitialized_fill(this->begin(), this->end(), Elt);
}
void assign(std::initializer_list<T> IL) {
clear();
append(IL);
}
iterator erase(const_iterator CI) {
// Just cast away constness because this is a non-const member function.
iterator I = const_cast<iterator>(CI);
assert(I >= this->begin() && "Iterator to erase is out of bounds.");
assert(I < this->end() && "Erasing at past-the-end iterator.");
iterator N = I;
// Shift all elts down one.
std::move(I+1, this->end(), I);
// Drop the last elt.
this->pop_back();
return(N);
}
iterator erase(const_iterator CS, const_iterator CE) {
// Just cast away constness because this is a non-const member function.
iterator S = const_cast<iterator>(CS);
iterator E = const_cast<iterator>(CE);
assert(S >= this->begin() && "Range to erase is out of bounds.");
assert(S <= E && "Trying to erase invalid range.");
assert(E <= this->end() && "Trying to erase past the end.");
iterator N = S;
// Shift all elts down.
iterator I = std::move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
iterator insert(iterator I, T &&Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(::std::move(Elt));
return this->end()-1;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
}
::new ((void*) this->end()) T(::std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
// If we just moved the element we're inserting, be sure to update
// the reference.
T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = ::std::move(*EltPtr);
return I;
}
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(Elt);
return this->end()-1;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
}
::new ((void*) this->end()) T(std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
// If we just moved the element we're inserting, be sure to update
// the reference.
const T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = *EltPtr;
return I;
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->begin()+InsertElt;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->begin()+InsertElt;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
size_t NumToInsert = std::distance(From, To);
// Ensure there is enough space.
reserve(this->size() + NumToInsert);
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
// reallocate the vector.
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(std::move_iterator<iterator>(this->end() - NumToInsert),
std::move_iterator<iterator>(this->end()));
// Copy the existing elements that get replaced.
std::move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
}
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
*J = *From;
++J; ++From;
}
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
void insert(iterator I, std::initializer_list<T> IL) {
insert(I, IL.begin(), IL.end());
}
template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
this->grow();
::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
this->setEnd(this->end() + 1);
}
SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
}
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
/// Set the array size to \p N, which the current array must have enough
/// capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
/// Clients can use this in conjunction with capacity() to write past the end
/// of the buffer when they know that more elements are available, and only
/// update the size later. This avoids the cost of value initializing elements
/// which will only be overwritten.
void set_size(size_type N) {
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
};
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
if (this == &RHS) return;
// We can only avoid copying elements if neither vector is small.
if (!this->isSmall() && !RHS.isSmall()) {
std::swap(this->BeginX, RHS.BeginX);
std::swap(this->EndX, RHS.EndX);
std::swap(this->CapacityX, RHS.CapacityX);
return;
}
if (RHS.size() > this->capacity())
this->grow(RHS.size());
if (this->size() > RHS.capacity())
RHS.grow(this->size());
// Swap the shared elements.
size_t NumShared = this->size();
if (NumShared > RHS.size()) NumShared = RHS.size();
for (size_type i = 0; i != NumShared; ++i)
std::swap((*this)[i], RHS[i]);
// Copy over the extra elts.
if (this->size() > RHS.size()) {
size_t EltDiff = this->size() - RHS.size();
this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
RHS.setEnd(RHS.end()+EltDiff);
this->destroy_range(this->begin()+NumShared, this->end());
this->setEnd(this->begin()+NumShared);
} else if (RHS.size() > this->size()) {
size_t EltDiff = RHS.size() - this->size();
this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
this->setEnd(this->end() + EltDiff);
this->destroy_range(RHS.begin()+NumShared, RHS.end());
RHS.setEnd(RHS.begin()+NumShared);
}
}
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd;
if (RHSSize)
NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
else
NewEnd = this->begin();
// Destroy excess elements.
this->destroy_range(NewEnd, this->end());
// Trim.
this->setEnd(NewEnd);
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Copy construct the new elements in place.
this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
return *this;
}
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If the RHS isn't small, clear this vector and then steal its buffer.
if (!RHS.isSmall()) {
this->destroy_range(this->begin(), this->end());
if (!this->isSmall()) free(this->begin());
this->BeginX = RHS.BeginX;
this->EndX = RHS.EndX;
this->CapacityX = RHS.CapacityX;
RHS.resetToSmall();
return *this;
}
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd = this->begin();
if (RHSSize)
NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
// Destroy excess elements and trim the bounds.
this->destroy_range(NewEnd, this->end());
this->setEnd(NewEnd);
// Clear the RHS.
RHS.clear();
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: this may not actually make any sense if we can efficiently move
// elements.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
}
// Move-construct the new elements in place.
this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
RHS.clear();
return *this;
}
/// Storage for the SmallVector elements which aren't contained in
/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1'
/// element is in the base class. This is specialized for the N=1 and N=0 cases
/// to avoid allocating unnecessary storage.
template <typename T, unsigned N>
struct SmallVectorStorage {
typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
};
template <typename T> struct SmallVectorStorage<T, 1> {};
template <typename T> struct SmallVectorStorage<T, 0> {};
/// This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
/// in-place, which allows it to avoid heap allocation when the actual number of
/// elements is below that threshold. This allows normal "small" cases to be
/// fast without losing generality for large inputs.
///
/// Note that this does not attempt to be exception safe.
///
template <typename T, unsigned N>
class SmallVector : public SmallVectorImpl<T> {
/// Inline space for elements which aren't stored in the base class.
SmallVectorStorage<T, N> Storage;
public:
SmallVector() : SmallVectorImpl<T>(N) {
}
explicit SmallVector(size_t Size, const T &Value = T())
: SmallVectorImpl<T>(N) {
this->assign(Size, Value);
}
template<typename ItTy>
SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
this->append(S, E);
}
template <typename RangeTy>
explicit SmallVector(const llvm::iterator_range<RangeTy> R)
: SmallVectorImpl<T>(N) {
this->append(R.begin(), R.end());
}
SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
this->assign(IL);
}
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(RHS);
}
const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
return *this;
}
SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
const SmallVector &operator=(SmallVector &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
const SmallVector &operator=(std::initializer_list<T> IL) {
this->assign(IL);
return *this;
}
};
template<typename T, unsigned N>
static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
}
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of SmallVector swap.
template<typename T>
inline void
swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
LHS.swap(RHS);
}
/// Implement std::swap in terms of SmallVector swap.
template<typename T, unsigned N>
inline void
swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
LHS.swap(RHS);
}
}
#endif

View File

@ -1,904 +0,0 @@
//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector -*- C++ -*- ===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SparseBitVector class. See the doxygen comment for
// SparseBitVector for more details on the algorithm used.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SPARSEBITVECTOR_H
#define LLVM_ADT_SPARSEBITVECTOR_H
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <climits>
namespace llvm {
/// SparseBitVector is an implementation of a bitvector that is sparse by only
/// storing the elements that have non-zero bits set. In order to make this
/// fast for the most common cases, SparseBitVector is implemented as a linked
/// list of SparseBitVectorElements. We maintain a pointer to the last
/// SparseBitVectorElement accessed (in the form of a list iterator), in order
/// to make multiple in-order test/set constant time after the first one is
/// executed. Note that using vectors to store SparseBitVectorElement's does
/// not work out very well because it causes insertion in the middle to take
/// enormous amounts of time with a large amount of bits. Other structures that
/// have better worst cases for insertion in the middle (various balanced trees,
/// etc) do not perform as well in practice as a linked list with this iterator
/// kept up to date. They are also significantly more memory intensive.
template <unsigned ElementSize = 128>
struct SparseBitVectorElement
: public ilist_node<SparseBitVectorElement<ElementSize> > {
public:
typedef unsigned long BitWord;
typedef unsigned size_type;
enum {
BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT,
BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE,
BITS_PER_ELEMENT = ElementSize
};
private:
// Index of Element in terms of where first bit starts.
unsigned ElementIndex;
BitWord Bits[BITWORDS_PER_ELEMENT];
// Needed for sentinels
friend struct ilist_sentinel_traits<SparseBitVectorElement>;
SparseBitVectorElement() {
ElementIndex = ~0U;
memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
}
public:
explicit SparseBitVectorElement(unsigned Idx) {
ElementIndex = Idx;
memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
}
// Comparison.
bool operator==(const SparseBitVectorElement &RHS) const {
if (ElementIndex != RHS.ElementIndex)
return false;
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
if (Bits[i] != RHS.Bits[i])
return false;
return true;
}
bool operator!=(const SparseBitVectorElement &RHS) const {
return !(*this == RHS);
}
// Return the bits that make up word Idx in our element.
BitWord word(unsigned Idx) const {
assert (Idx < BITWORDS_PER_ELEMENT);
return Bits[Idx];
}
unsigned index() const {
return ElementIndex;
}
bool empty() const {
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
if (Bits[i])
return false;
return true;
}
void set(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
}
bool test_and_set (unsigned Idx) {
bool old = test(Idx);
if (!old) {
set(Idx);
return true;
}
return false;
}
void reset(unsigned Idx) {
Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
}
bool test(unsigned Idx) const {
return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE));
}
size_type count() const {
unsigned NumBits = 0;
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
NumBits += countPopulation(Bits[i]);
return NumBits;
}
/// find_first - Returns the index of the first set bit.
int find_first() const {
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
if (Bits[i] != 0)
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
llvm_unreachable("Illegal empty element");
}
/// find_next - Returns the index of the next set bit starting from the
/// "Curr" bit. Returns -1 if the next set bit is not found.
int find_next(unsigned Curr) const {
if (Curr >= BITS_PER_ELEMENT)
return -1;
unsigned WordPos = Curr / BITWORD_SIZE;
unsigned BitPos = Curr % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
assert (WordPos <= BITWORDS_PER_ELEMENT
&& "Word Position outside of element");
// Mask off previous bits.
Copy &= ~0UL << BitPos;
if (Copy != 0)
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
// Check subsequent words.
for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
if (Bits[i] != 0)
return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
return -1;
}
// Union this element with RHS and return true if this one changed.
bool unionWith(const SparseBitVectorElement &RHS) {
bool changed = false;
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
BitWord old = changed ? 0 : Bits[i];
Bits[i] |= RHS.Bits[i];
if (!changed && old != Bits[i])
changed = true;
}
return changed;
}
// Return true if we have any bits in common with RHS
bool intersects(const SparseBitVectorElement &RHS) const {
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
if (RHS.Bits[i] & Bits[i])
return true;
}
return false;
}
// Intersect this Element with RHS and return true if this one changed.
// BecameZero is set to true if this element became all-zero bits.
bool intersectWith(const SparseBitVectorElement &RHS,
bool &BecameZero) {
bool changed = false;
bool allzero = true;
BecameZero = false;
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
BitWord old = changed ? 0 : Bits[i];
Bits[i] &= RHS.Bits[i];
if (Bits[i] != 0)
allzero = false;
if (!changed && old != Bits[i])
changed = true;
}
BecameZero = allzero;
return changed;
}
// Intersect this Element with the complement of RHS and return true if this
// one changed. BecameZero is set to true if this element became all-zero
// bits.
bool intersectWithComplement(const SparseBitVectorElement &RHS,
bool &BecameZero) {
bool changed = false;
bool allzero = true;
BecameZero = false;
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
BitWord old = changed ? 0 : Bits[i];
Bits[i] &= ~RHS.Bits[i];
if (Bits[i] != 0)
allzero = false;
if (!changed && old != Bits[i])
changed = true;
}
BecameZero = allzero;
return changed;
}
// Three argument version of intersectWithComplement that intersects
// RHS1 & ~RHS2 into this element
void intersectWithComplement(const SparseBitVectorElement &RHS1,
const SparseBitVectorElement &RHS2,
bool &BecameZero) {
bool allzero = true;
BecameZero = false;
for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i];
if (Bits[i] != 0)
allzero = false;
}
BecameZero = allzero;
}
};
template <unsigned ElementSize>
struct ilist_traits<SparseBitVectorElement<ElementSize> >
: public ilist_default_traits<SparseBitVectorElement<ElementSize> > {
typedef SparseBitVectorElement<ElementSize> Element;
Element *createSentinel() const { return static_cast<Element *>(&Sentinel); }
static void destroySentinel(Element *) {}
Element *provideInitialHead() const { return createSentinel(); }
Element *ensureHead(Element *) const { return createSentinel(); }
static void noteHead(Element *, Element *) {}
private:
mutable ilist_half_node<Element> Sentinel;
};
template <unsigned ElementSize = 128>
class SparseBitVector {
typedef ilist<SparseBitVectorElement<ElementSize> > ElementList;
typedef typename ElementList::iterator ElementListIter;
typedef typename ElementList::const_iterator ElementListConstIter;
enum {
BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
};
// Pointer to our current Element.
ElementListIter CurrElementIter;
ElementList Elements;
// This is like std::lower_bound, except we do linear searching from the
// current position.
ElementListIter FindLowerBound(unsigned ElementIndex) {
if (Elements.empty()) {
CurrElementIter = Elements.begin();
return Elements.begin();
}
// Make sure our current iterator is valid.
if (CurrElementIter == Elements.end())
--CurrElementIter;
// Search from our current iterator, either backwards or forwards,
// depending on what element we are looking for.
ElementListIter ElementIter = CurrElementIter;
if (CurrElementIter->index() == ElementIndex) {
return ElementIter;
} else if (CurrElementIter->index() > ElementIndex) {
while (ElementIter != Elements.begin()
&& ElementIter->index() > ElementIndex)
--ElementIter;
} else {
while (ElementIter != Elements.end() &&
ElementIter->index() < ElementIndex)
++ElementIter;
}
CurrElementIter = ElementIter;
return ElementIter;
}
// Iterator to walk set bits in the bitmap. This iterator is a lot uglier
// than it would be, in order to be efficient.
class SparseBitVectorIterator {
private:
bool AtEnd;
const SparseBitVector<ElementSize> *BitVector;
// Current element inside of bitmap.
ElementListConstIter Iter;
// Current bit number inside of our bitmap.
unsigned BitNumber;
// Current word number inside of our element.
unsigned WordNumber;
// Current bits from the element.
typename SparseBitVectorElement<ElementSize>::BitWord Bits;
// Move our iterator to the first non-zero bit in the bitmap.
void AdvanceToFirstNonZero() {
if (AtEnd)
return;
if (BitVector->Elements.empty()) {
AtEnd = true;
return;
}
Iter = BitVector->Elements.begin();
BitNumber = Iter->index() * ElementSize;
unsigned BitPos = Iter->find_first();
BitNumber += BitPos;
WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
Bits = Iter->word(WordNumber);
Bits >>= BitPos % BITWORD_SIZE;
}
// Move our iterator to the next non-zero bit.
void AdvanceToNextNonZero() {
if (AtEnd)
return;
while (Bits && !(Bits & 1)) {
Bits >>= 1;
BitNumber += 1;
}
// See if we ran out of Bits in this word.
if (!Bits) {
int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ;
// If we ran out of set bits in this element, move to next element.
if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) {
++Iter;
WordNumber = 0;
// We may run out of elements in the bitmap.
if (Iter == BitVector->Elements.end()) {
AtEnd = true;
return;
}
// Set up for next non-zero word in bitmap.
BitNumber = Iter->index() * ElementSize;
NextSetBitNumber = Iter->find_first();
BitNumber += NextSetBitNumber;
WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
Bits = Iter->word(WordNumber);
Bits >>= NextSetBitNumber % BITWORD_SIZE;
} else {
WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE;
Bits = Iter->word(WordNumber);
Bits >>= NextSetBitNumber % BITWORD_SIZE;
BitNumber = Iter->index() * ElementSize;
BitNumber += NextSetBitNumber;
}
}
}
public:
// Preincrement.
inline SparseBitVectorIterator& operator++() {
++BitNumber;
Bits >>= 1;
AdvanceToNextNonZero();
return *this;
}
// Postincrement.
inline SparseBitVectorIterator operator++(int) {
SparseBitVectorIterator tmp = *this;
++*this;
return tmp;
}
// Return the current set bit number.
unsigned operator*() const {
return BitNumber;
}
bool operator==(const SparseBitVectorIterator &RHS) const {
// If they are both at the end, ignore the rest of the fields.
if (AtEnd && RHS.AtEnd)
return true;
// Otherwise they are the same if they have the same bit number and
// bitmap.
return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber;
}
bool operator!=(const SparseBitVectorIterator &RHS) const {
return !(*this == RHS);
}
SparseBitVectorIterator(): BitVector(nullptr) {
}
SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
bool end = false):BitVector(RHS) {
Iter = BitVector->Elements.begin();
BitNumber = 0;
Bits = 0;
WordNumber = ~0;
AtEnd = end;
AdvanceToFirstNonZero();
}
};
public:
typedef SparseBitVectorIterator iterator;
SparseBitVector () {
CurrElementIter = Elements.begin ();
}
~SparseBitVector() {
}
// SparseBitVector copy ctor.
SparseBitVector(const SparseBitVector &RHS) {
ElementListConstIter ElementIter = RHS.Elements.begin();
while (ElementIter != RHS.Elements.end()) {
Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
++ElementIter;
}
CurrElementIter = Elements.begin ();
}
// Clear.
void clear() {
Elements.clear();
}
// Assignment
SparseBitVector& operator=(const SparseBitVector& RHS) {
if (this == &RHS)
return *this;
Elements.clear();
ElementListConstIter ElementIter = RHS.Elements.begin();
while (ElementIter != RHS.Elements.end()) {
Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
++ElementIter;
}
CurrElementIter = Elements.begin ();
return *this;
}
// Test, Reset, and Set a bit in the bitmap.
bool test(unsigned Idx) {
if (Elements.empty())
return false;
unsigned ElementIndex = Idx / ElementSize;
ElementListIter ElementIter = FindLowerBound(ElementIndex);
// If we can't find an element that is supposed to contain this bit, there
// is nothing more to do.
if (ElementIter == Elements.end() ||
ElementIter->index() != ElementIndex)
return false;
return ElementIter->test(Idx % ElementSize);
}
void reset(unsigned Idx) {
if (Elements.empty())
return;
unsigned ElementIndex = Idx / ElementSize;
ElementListIter ElementIter = FindLowerBound(ElementIndex);
// If we can't find an element that is supposed to contain this bit, there
// is nothing more to do.
if (ElementIter == Elements.end() ||
ElementIter->index() != ElementIndex)
return;
ElementIter->reset(Idx % ElementSize);
// When the element is zeroed out, delete it.
if (ElementIter->empty()) {
++CurrElementIter;
Elements.erase(ElementIter);
}
}
void set(unsigned Idx) {
unsigned ElementIndex = Idx / ElementSize;
SparseBitVectorElement<ElementSize> *Element;
ElementListIter ElementIter;
if (Elements.empty()) {
Element = new SparseBitVectorElement<ElementSize>(ElementIndex);
ElementIter = Elements.insert(Elements.end(), Element);
} else {
ElementIter = FindLowerBound(ElementIndex);
if (ElementIter == Elements.end() ||
ElementIter->index() != ElementIndex) {
Element = new SparseBitVectorElement<ElementSize>(ElementIndex);
// We may have hit the beginning of our SparseBitVector, in which case,
// we may need to insert right after this element, which requires moving
// the current iterator forward one, because insert does insert before.
if (ElementIter != Elements.end() &&
ElementIter->index() < ElementIndex)
ElementIter = Elements.insert(++ElementIter, Element);
else
ElementIter = Elements.insert(ElementIter, Element);
}
}
CurrElementIter = ElementIter;
ElementIter->set(Idx % ElementSize);
}
bool test_and_set (unsigned Idx) {
bool old = test(Idx);
if (!old) {
set(Idx);
return true;
}
return false;
}
bool operator!=(const SparseBitVector &RHS) const {
return !(*this == RHS);
}
bool operator==(const SparseBitVector &RHS) const {
ElementListConstIter Iter1 = Elements.begin();
ElementListConstIter Iter2 = RHS.Elements.begin();
for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end();
++Iter1, ++Iter2) {
if (*Iter1 != *Iter2)
return false;
}
return Iter1 == Elements.end() && Iter2 == RHS.Elements.end();
}
// Union our bitmap with the RHS and return true if we changed.
bool operator|=(const SparseBitVector &RHS) {
if (this == &RHS)
return false;
bool changed = false;
ElementListIter Iter1 = Elements.begin();
ElementListConstIter Iter2 = RHS.Elements.begin();
// If RHS is empty, we are done
if (RHS.Elements.empty())
return false;
while (Iter2 != RHS.Elements.end()) {
if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
Elements.insert(Iter1,
new SparseBitVectorElement<ElementSize>(*Iter2));
++Iter2;
changed = true;
} else if (Iter1->index() == Iter2->index()) {
changed |= Iter1->unionWith(*Iter2);
++Iter1;
++Iter2;
} else {
++Iter1;
}
}
CurrElementIter = Elements.begin();
return changed;
}
// Intersect our bitmap with the RHS and return true if ours changed.
bool operator&=(const SparseBitVector &RHS) {
if (this == &RHS)
return false;
bool changed = false;
ElementListIter Iter1 = Elements.begin();
ElementListConstIter Iter2 = RHS.Elements.begin();
// Check if both bitmaps are empty.
if (Elements.empty() && RHS.Elements.empty())
return false;
// Loop through, intersecting as we go, erasing elements when necessary.
while (Iter2 != RHS.Elements.end()) {
if (Iter1 == Elements.end()) {
CurrElementIter = Elements.begin();
return changed;
}
if (Iter1->index() > Iter2->index()) {
++Iter2;
} else if (Iter1->index() == Iter2->index()) {
bool BecameZero;
changed |= Iter1->intersectWith(*Iter2, BecameZero);
if (BecameZero) {
ElementListIter IterTmp = Iter1;
++Iter1;
Elements.erase(IterTmp);
} else {
++Iter1;
}
++Iter2;
} else {
ElementListIter IterTmp = Iter1;
++Iter1;
Elements.erase(IterTmp);
changed = true;
}
}
if (Iter1 != Elements.end()) {
Elements.erase(Iter1, Elements.end());
changed = true;
}
CurrElementIter = Elements.begin();
return changed;
}
// Intersect our bitmap with the complement of the RHS and return true
// if ours changed.
bool intersectWithComplement(const SparseBitVector &RHS) {
if (this == &RHS) {
if (!empty()) {
clear();
return true;
}
return false;
}
bool changed = false;
ElementListIter Iter1 = Elements.begin();
ElementListConstIter Iter2 = RHS.Elements.begin();
// If either our bitmap or RHS is empty, we are done
if (Elements.empty() || RHS.Elements.empty())
return false;
// Loop through, intersecting as we go, erasing elements when necessary.
while (Iter2 != RHS.Elements.end()) {
if (Iter1 == Elements.end()) {
CurrElementIter = Elements.begin();
return changed;
}
if (Iter1->index() > Iter2->index()) {
++Iter2;
} else if (Iter1->index() == Iter2->index()) {
bool BecameZero;
changed |= Iter1->intersectWithComplement(*Iter2, BecameZero);
if (BecameZero) {
ElementListIter IterTmp = Iter1;
++Iter1;
Elements.erase(IterTmp);
} else {
++Iter1;
}
++Iter2;
} else {
++Iter1;
}
}
CurrElementIter = Elements.begin();
return changed;
}
bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const {
return intersectWithComplement(*RHS);
}
// Three argument version of intersectWithComplement.
// Result of RHS1 & ~RHS2 is stored into this bitmap.
void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1,
const SparseBitVector<ElementSize> &RHS2)
{
if (this == &RHS1) {
intersectWithComplement(RHS2);
return;
} else if (this == &RHS2) {
SparseBitVector RHS2Copy(RHS2);
intersectWithComplement(RHS1, RHS2Copy);
return;
}
Elements.clear();
CurrElementIter = Elements.begin();
ElementListConstIter Iter1 = RHS1.Elements.begin();
ElementListConstIter Iter2 = RHS2.Elements.begin();
// If RHS1 is empty, we are done
// If RHS2 is empty, we still have to copy RHS1
if (RHS1.Elements.empty())
return;
// Loop through, intersecting as we go, erasing elements when necessary.
while (Iter2 != RHS2.Elements.end()) {
if (Iter1 == RHS1.Elements.end())
return;
if (Iter1->index() > Iter2->index()) {
++Iter2;
} else if (Iter1->index() == Iter2->index()) {
bool BecameZero = false;
SparseBitVectorElement<ElementSize> *NewElement =
new SparseBitVectorElement<ElementSize>(Iter1->index());
NewElement->intersectWithComplement(*Iter1, *Iter2, BecameZero);
if (!BecameZero) {
Elements.push_back(NewElement);
}
else
delete NewElement;
++Iter1;
++Iter2;
} else {
SparseBitVectorElement<ElementSize> *NewElement =
new SparseBitVectorElement<ElementSize>(*Iter1);
Elements.push_back(NewElement);
++Iter1;
}
}
// copy the remaining elements
while (Iter1 != RHS1.Elements.end()) {
SparseBitVectorElement<ElementSize> *NewElement =
new SparseBitVectorElement<ElementSize>(*Iter1);
Elements.push_back(NewElement);
++Iter1;
}
}
void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
const SparseBitVector<ElementSize> *RHS2) {
intersectWithComplement(*RHS1, *RHS2);
}
bool intersects(const SparseBitVector<ElementSize> *RHS) const {
return intersects(*RHS);
}
// Return true if we share any bits in common with RHS
bool intersects(const SparseBitVector<ElementSize> &RHS) const {
ElementListConstIter Iter1 = Elements.begin();
ElementListConstIter Iter2 = RHS.Elements.begin();
// Check if both bitmaps are empty.
if (Elements.empty() && RHS.Elements.empty())
return false;
// Loop through, intersecting stopping when we hit bits in common.
while (Iter2 != RHS.Elements.end()) {
if (Iter1 == Elements.end())
return false;
if (Iter1->index() > Iter2->index()) {
++Iter2;
} else if (Iter1->index() == Iter2->index()) {
if (Iter1->intersects(*Iter2))
return true;
++Iter1;
++Iter2;
} else {
++Iter1;
}
}
return false;
}
// Return true iff all bits set in this SparseBitVector are
// also set in RHS.
bool contains(const SparseBitVector<ElementSize> &RHS) const {
SparseBitVector<ElementSize> Result(*this);
Result &= RHS;
return (Result == RHS);
}
// Return the first set bit in the bitmap. Return -1 if no bits are set.
int find_first() const {
if (Elements.empty())
return -1;
const SparseBitVectorElement<ElementSize> &First = *(Elements.begin());
return (First.index() * ElementSize) + First.find_first();
}
// Return true if the SparseBitVector is empty
bool empty() const {
return Elements.empty();
}
unsigned count() const {
unsigned BitCount = 0;
for (ElementListConstIter Iter = Elements.begin();
Iter != Elements.end();
++Iter)
BitCount += Iter->count();
return BitCount;
}
iterator begin() const {
return iterator(this);
}
iterator end() const {
return iterator(this, true);
}
};
// Convenience functions to allow Or and And without dereferencing in the user
// code.
template <unsigned ElementSize>
inline bool operator |=(SparseBitVector<ElementSize> &LHS,
const SparseBitVector<ElementSize> *RHS) {
return LHS |= *RHS;
}
template <unsigned ElementSize>
inline bool operator |=(SparseBitVector<ElementSize> *LHS,
const SparseBitVector<ElementSize> &RHS) {
return LHS->operator|=(RHS);
}
template <unsigned ElementSize>
inline bool operator &=(SparseBitVector<ElementSize> *LHS,
const SparseBitVector<ElementSize> &RHS) {
return LHS->operator&=(RHS);
}
template <unsigned ElementSize>
inline bool operator &=(SparseBitVector<ElementSize> &LHS,
const SparseBitVector<ElementSize> *RHS) {
return LHS &= *RHS;
}
// Convenience functions for infix union, intersection, difference operators.
template <unsigned ElementSize>
inline SparseBitVector<ElementSize>
operator|(const SparseBitVector<ElementSize> &LHS,
const SparseBitVector<ElementSize> &RHS) {
SparseBitVector<ElementSize> Result(LHS);
Result |= RHS;
return Result;
}
template <unsigned ElementSize>
inline SparseBitVector<ElementSize>
operator&(const SparseBitVector<ElementSize> &LHS,
const SparseBitVector<ElementSize> &RHS) {
SparseBitVector<ElementSize> Result(LHS);
Result &= RHS;
return Result;
}
template <unsigned ElementSize>
inline SparseBitVector<ElementSize>
operator-(const SparseBitVector<ElementSize> &LHS,
const SparseBitVector<ElementSize> &RHS) {
SparseBitVector<ElementSize> Result;
Result.intersectWithComplement(LHS, RHS);
return Result;
}
// Dump a SparseBitVector to a stream
template <unsigned ElementSize>
void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
out << "[";
typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
be = LHS.end();
if (bi != be) {
out << *bi;
for (++bi; bi != be; ++bi) {
out << " " << *bi;
}
}
out << "]\n";
}
} // end namespace llvm
#endif // LLVM_ADT_SPARSEBITVECTOR_H

View File

@ -1,518 +0,0 @@
//===--- llvm/ADT/SparseMultiSet.h - Sparse multiset ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SparseMultiSet class, which adds multiset behavior to
// the SparseSet.
//
// A sparse multiset holds a small number of objects identified by integer keys
// from a moderately sized universe. The sparse multiset uses more memory than
// other containers in order to provide faster operations. Any key can map to
// multiple values. A SparseMultiSetNode class is provided, which serves as a
// convenient base class for the contents of a SparseMultiSet.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SPARSEMULTISET_H
#define LLVM_ADT_SPARSEMULTISET_H
#include "llvm/ADT/SparseSet.h"
namespace llvm {
/// Fast multiset implementation for objects that can be identified by small
/// unsigned keys.
///
/// SparseMultiSet allocates memory proportional to the size of the key
/// universe, so it is not recommended for building composite data structures.
/// It is useful for algorithms that require a single set with fast operations.
///
/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
/// fast clear() as fast as a vector. The find(), insert(), and erase()
/// operations are all constant time, and typically faster than a hash table.
/// The iteration order doesn't depend on numerical key values, it only depends
/// on the order of insert() and erase() operations. Iteration order is the
/// insertion order. Iteration is only provided over elements of equivalent
/// keys, but iterators are bidirectional.
///
/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
/// offers constant-time clear() and size() operations as well as fast iteration
/// independent on the size of the universe.
///
/// SparseMultiSet contains a dense vector holding all the objects and a sparse
/// array holding indexes into the dense vector. Most of the memory is used by
/// the sparse array which is the size of the key universe. The SparseT template
/// parameter provides a space/speed tradeoff for sets holding many elements.
///
/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
/// sparse array uses 4 x Universe bytes.
///
/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
/// lines, but the sparse array is 4x smaller. N is the number of elements in
/// the set.
///
/// For sets that may grow to thousands of elements, SparseT should be set to
/// uint16_t or uint32_t.
///
/// Multiset behavior is provided by providing doubly linked lists for values
/// that are inlined in the dense vector. SparseMultiSet is a good choice when
/// one desires a growable number of entries per key, as it will retain the
/// SparseSet algorithmic properties despite being growable. Thus, it is often a
/// better choice than a SparseSet of growable containers or a vector of
/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
/// the iterators don't point to the element erased), allowing for more
/// intuitive and fast removal.
///
/// @tparam ValueT The type of objects in the set.
/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @tparam SparseT An unsigned integer type. See above.
///
template<typename ValueT,
typename KeyFunctorT = llvm::identity<unsigned>,
typename SparseT = uint8_t>
class SparseMultiSet {
static_assert(std::numeric_limits<SparseT>::is_integer &&
!std::numeric_limits<SparseT>::is_signed,
"SparseT must be an unsigned integer type");
/// The actual data that's stored, as a doubly-linked list implemented via
/// indices into the DenseVector. The doubly linked list is implemented
/// circular in Prev indices, and INVALID-terminated in Next indices. This
/// provides efficient access to list tails. These nodes can also be
/// tombstones, in which case they are actually nodes in a single-linked
/// freelist of recyclable slots.
struct SMSNode {
static const unsigned INVALID = ~0U;
ValueT Data;
unsigned Prev;
unsigned Next;
SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) { }
/// List tails have invalid Nexts.
bool isTail() const {
return Next == INVALID;
}
/// Whether this node is a tombstone node, and thus is in our freelist.
bool isTombstone() const {
return Prev == INVALID;
}
/// Since the list is circular in Prev, all non-tombstone nodes have a valid
/// Prev.
bool isValid() const { return Prev != INVALID; }
};
typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<SMSNode, 8> DenseT;
DenseT Dense;
SparseT *Sparse;
unsigned Universe;
KeyFunctorT KeyIndexOf;
SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
/// We have a built-in recycler for reusing tombstone slots. This recycler
/// puts a singly-linked free list into tombstone slots, allowing us quick
/// erasure, iterator preservation, and dense size.
unsigned FreelistIdx;
unsigned NumFree;
unsigned sparseIndex(const ValueT &Val) const {
assert(ValIndexOf(Val) < Universe &&
"Invalid key in set. Did object mutate?");
return ValIndexOf(Val);
}
unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
SparseMultiSet(const SparseMultiSet&) = delete;
SparseMultiSet &operator=(const SparseMultiSet&) = delete;
/// Whether the given entry is the head of the list. List heads's previous
/// pointers are to the tail of the list, allowing for efficient access to the
/// list tail. D must be a valid entry node.
bool isHead(const SMSNode &D) const {
assert(D.isValid() && "Invalid node for head");
return Dense[D.Prev].isTail();
}
/// Whether the given entry is a singleton entry, i.e. the only entry with
/// that key.
bool isSingleton(const SMSNode &N) const {
assert(N.isValid() && "Invalid node for singleton");
// Is N its own predecessor?
return &Dense[N.Prev] == &N;
}
/// Add in the given SMSNode. Uses a free entry in our freelist if
/// available. Returns the index of the added node.
unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
if (NumFree == 0) {
Dense.push_back(SMSNode(V, Prev, Next));
return Dense.size() - 1;
}
// Peel off a free slot
unsigned Idx = FreelistIdx;
unsigned NextFree = Dense[Idx].Next;
assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
Dense[Idx] = SMSNode(V, Prev, Next);
FreelistIdx = NextFree;
--NumFree;
return Idx;
}
/// Make the current index a new tombstone. Pushes it onto the freelist.
void makeTombstone(unsigned Idx) {
Dense[Idx].Prev = SMSNode::INVALID;
Dense[Idx].Next = FreelistIdx;
FreelistIdx = Idx;
++NumFree;
}
public:
typedef ValueT value_type;
typedef ValueT &reference;
typedef const ValueT &const_reference;
typedef ValueT *pointer;
typedef const ValueT *const_pointer;
typedef unsigned size_type;
SparseMultiSet()
: Sparse(nullptr), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) {}
~SparseMultiSet() { free(Sparse); }
/// Set the universe size which determines the largest key the set can hold.
/// The universe must be sized before any elements can be added.
///
/// @param U Universe size. All object keys must be less than U.
///
void setUniverse(unsigned U) {
// It's not hard to resize the universe on a non-empty set, but it doesn't
// seem like a likely use case, so we can add that code when we need it.
assert(empty() && "Can only resize universe on an empty map");
// Hysteresis prevents needless reallocations.
if (U >= Universe/4 && U <= Universe)
return;
free(Sparse);
// The Sparse array doesn't actually need to be initialized, so malloc
// would be enough here, but that will cause tools like valgrind to
// complain about branching on uninitialized data.
Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
Universe = U;
}
/// Our iterators are iterators over the collection of objects that share a
/// key.
template<typename SMSPtrTy>
class iterator_base : public std::iterator<std::bidirectional_iterator_tag,
ValueT> {
friend class SparseMultiSet;
SMSPtrTy SMS;
unsigned Idx;
unsigned SparseIdx;
iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
: SMS(P), Idx(I), SparseIdx(SI) { }
/// Whether our iterator has fallen outside our dense vector.
bool isEnd() const {
if (Idx == SMSNode::INVALID)
return true;
assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
return false;
}
/// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
bool isKeyed() const { return SparseIdx < SMS->Universe; }
unsigned Prev() const { return SMS->Dense[Idx].Prev; }
unsigned Next() const { return SMS->Dense[Idx].Next; }
void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
public:
typedef std::iterator<std::bidirectional_iterator_tag, ValueT> super;
typedef typename super::value_type value_type;
typedef typename super::difference_type difference_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
reference operator*() const {
assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
"Dereferencing iterator of invalid key or index");
return SMS->Dense[Idx].Data;
}
pointer operator->() const { return &operator*(); }
/// Comparison operators
bool operator==(const iterator_base &RHS) const {
// end compares equal
if (SMS == RHS.SMS && Idx == RHS.Idx) {
assert((isEnd() || SparseIdx == RHS.SparseIdx) &&
"Same dense entry, but different keys?");
return true;
}
return false;
}
bool operator!=(const iterator_base &RHS) const {
return !operator==(RHS);
}
/// Increment and decrement operators
iterator_base &operator--() { // predecrement - Back up
assert(isKeyed() && "Decrementing an invalid iterator");
assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
"Decrementing head of list");
// If we're at the end, then issue a new find()
if (isEnd())
Idx = SMS->findIndex(SparseIdx).Prev();
else
Idx = Prev();
return *this;
}
iterator_base &operator++() { // preincrement - Advance
assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
Idx = Next();
return *this;
}
iterator_base operator--(int) { // postdecrement
iterator_base I(*this);
--*this;
return I;
}
iterator_base operator++(int) { // postincrement
iterator_base I(*this);
++*this;
return I;
}
};
typedef iterator_base<SparseMultiSet *> iterator;
typedef iterator_base<const SparseMultiSet *> const_iterator;
// Convenience types
typedef std::pair<iterator, iterator> RangePair;
/// Returns an iterator past this container. Note that such an iterator cannot
/// be decremented, but will compare equal to other end iterators.
iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
const_iterator end() const {
return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
}
/// Returns true if the set is empty.
///
/// This is not the same as BitVector::empty().
///
bool empty() const { return size() == 0; }
/// Returns the number of elements in the set.
///
/// This is not the same as BitVector::size() which returns the size of the
/// universe.
///
size_type size() const {
assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
return Dense.size() - NumFree;
}
/// Clears the set. This is a very fast constant time operation.
///
void clear() {
// Sparse does not need to be cleared, see find().
Dense.clear();
NumFree = 0;
FreelistIdx = SMSNode::INVALID;
}
/// Find an element by its index.
///
/// @param Idx A valid index to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator findIndex(unsigned Idx) {
assert(Idx < Universe && "Key out of range");
const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
const unsigned FoundIdx = sparseIndex(Dense[i]);
// Check that we're pointing at the correct entry and that it is the head
// of a valid list.
if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
return iterator(this, i, Idx);
// Stride is 0 when SparseT >= unsigned. We don't need to loop.
if (!Stride)
break;
}
return end();
}
/// Find an element by its key.
///
/// @param Key A valid key to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator find(const KeyT &Key) {
return findIndex(KeyIndexOf(Key));
}
const_iterator find(const KeyT &Key) const {
iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
}
/// Returns the number of elements identified by Key. This will be linear in
/// the number of elements of that key.
size_type count(const KeyT &Key) const {
unsigned Ret = 0;
for (const_iterator It = find(Key); It != end(); ++It)
++Ret;
return Ret;
}
/// Returns true if this set contains an element identified by Key.
bool contains(const KeyT &Key) const {
return find(Key) != end();
}
/// Return the head and tail of the subset's list, otherwise returns end().
iterator getHead(const KeyT &Key) { return find(Key); }
iterator getTail(const KeyT &Key) {
iterator I = find(Key);
if (I != end())
I = iterator(this, I.Prev(), KeyIndexOf(Key));
return I;
}
/// The bounds of the range of items sharing Key K. First member is the head
/// of the list, and the second member is a decrementable end iterator for
/// that key.
RangePair equal_range(const KeyT &K) {
iterator B = find(K);
iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
return make_pair(B, E);
}
/// Insert a new element at the tail of the subset list. Returns an iterator
/// to the newly added entry.
iterator insert(const ValueT &Val) {
unsigned Idx = sparseIndex(Val);
iterator I = findIndex(Idx);
unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
if (I == end()) {
// Make a singleton list
Sparse[Idx] = NodeIdx;
Dense[NodeIdx].Prev = NodeIdx;
return iterator(this, NodeIdx, Idx);
}
// Stick it at the end.
unsigned HeadIdx = I.Idx;
unsigned TailIdx = I.Prev();
Dense[TailIdx].Next = NodeIdx;
Dense[HeadIdx].Prev = NodeIdx;
Dense[NodeIdx].Prev = TailIdx;
return iterator(this, NodeIdx, Idx);
}
/// Erases an existing element identified by a valid iterator.
///
/// This invalidates iterators pointing at the same entry, but erase() returns
/// an iterator pointing to the next element in the subset's list. This makes
/// it possible to erase selected elements while iterating over the subset:
///
/// tie(I, E) = Set.equal_range(Key);
/// while (I != E)
/// if (test(*I))
/// I = Set.erase(I);
/// else
/// ++I;
///
/// Note that if the last element in the subset list is erased, this will
/// return an end iterator which can be decremented to get the new tail (if it
/// exists):
///
/// tie(B, I) = Set.equal_range(Key);
/// for (bool isBegin = B == I; !isBegin; /* empty */) {
/// isBegin = (--I) == B;
/// if (test(I))
/// break;
/// I = erase(I);
/// }
iterator erase(iterator I) {
assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
"erasing invalid/end/tombstone iterator");
// First, unlink the node from its list. Then swap the node out with the
// dense vector's last entry
iterator NextI = unlink(Dense[I.Idx]);
// Put in a tombstone.
makeTombstone(I.Idx);
return NextI;
}
/// Erase all elements with the given key. This invalidates all
/// iterators of that key.
void eraseAll(const KeyT &K) {
for (iterator I = find(K); I != end(); /* empty */)
I = erase(I);
}
private:
/// Unlink the node from its list. Returns the next node in the list.
iterator unlink(const SMSNode &N) {
if (isSingleton(N)) {
// Singleton is already unlinked
assert(N.Next == SMSNode::INVALID && "Singleton has next?");
return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
}
if (isHead(N)) {
// If we're the head, then update the sparse array and our next.
Sparse[sparseIndex(N)] = N.Next;
Dense[N.Next].Prev = N.Prev;
return iterator(this, N.Next, ValIndexOf(N.Data));
}
if (N.isTail()) {
// If we're the tail, then update our head and our previous.
findIndex(sparseIndex(N)).setPrev(N.Prev);
Dense[N.Prev].Next = N.Next;
// Give back an end iterator that can be decremented
iterator I(this, N.Prev, ValIndexOf(N.Data));
return ++I;
}
// Otherwise, just drop us
Dense[N.Next].Prev = N.Prev;
Dense[N.Prev].Next = N.Next;
return iterator(this, N.Next, ValIndexOf(N.Data));
}
};
} // end namespace llvm
#endif

View File

@ -1,316 +0,0 @@
//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SparseSet class derived from the version described in
// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec. 1993.
//
// A sparse set holds a small number of objects identified by integer keys from
// a moderately sized universe. The sparse set uses more memory than other
// containers in order to provide faster operations.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SPARSESET_H
#define LLVM_ADT_SPARSESET_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
#include <limits>
namespace llvm {
/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
/// be uniquely converted to a small integer less than the set's universe. This
/// class allows the set to hold values that differ from the set's key type as
/// long as an index can still be derived from the value. SparseSet never
/// directly compares ValueT, only their indices, so it can map keys to
/// arbitrary values. SparseSetValTraits computes the index from the value
/// object. To compute the index from a key, SparseSet uses a separate
/// KeyFunctorT template argument.
///
/// A simple type declaration, SparseSet<Type>, handles these cases:
/// - unsigned key, identity index, identity value
/// - unsigned key, identity index, fat value providing getSparseSetIndex()
///
/// The type declaration SparseSet<Type, UnaryFunction> handles:
/// - unsigned key, remapped index, identity value (virtual registers)
/// - pointer key, pointer-derived index, identity value (node+ID)
/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
///
/// Only other, unexpected cases require specializing SparseSetValTraits.
///
/// For best results, ValueT should not require a destructor.
///
template<typename ValueT>
struct SparseSetValTraits {
static unsigned getValIndex(const ValueT &Val) {
return Val.getSparseSetIndex();
}
};
/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
/// generic implementation handles ValueT classes which either provide
/// getSparseSetIndex() or specialize SparseSetValTraits<>.
///
template<typename KeyT, typename ValueT, typename KeyFunctorT>
struct SparseSetValFunctor {
unsigned operator()(const ValueT &Val) const {
return SparseSetValTraits<ValueT>::getValIndex(Val);
}
};
/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
/// identity key/value sets.
template<typename KeyT, typename KeyFunctorT>
struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
unsigned operator()(const KeyT &Key) const {
return KeyFunctorT()(Key);
}
};
/// SparseSet - Fast set implmentation for objects that can be identified by
/// small unsigned keys.
///
/// SparseSet allocates memory proportional to the size of the key universe, so
/// it is not recommended for building composite data structures. It is useful
/// for algorithms that require a single set with fast operations.
///
/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
/// clear() and iteration as fast as a vector. The find(), insert(), and
/// erase() operations are all constant time, and typically faster than a hash
/// table. The iteration order doesn't depend on numerical key values, it only
/// depends on the order of insert() and erase() operations. When no elements
/// have been erased, the iteration order is the insertion order.
///
/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
/// offers constant-time clear() and size() operations as well as fast
/// iteration independent on the size of the universe.
///
/// SparseSet contains a dense vector holding all the objects and a sparse
/// array holding indexes into the dense vector. Most of the memory is used by
/// the sparse array which is the size of the key universe. The SparseT
/// template parameter provides a space/speed tradeoff for sets holding many
/// elements.
///
/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
/// array uses 4 x Universe bytes.
///
/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
/// lines, but the sparse array is 4x smaller. N is the number of elements in
/// the set.
///
/// For sets that may grow to thousands of elements, SparseT should be set to
/// uint16_t or uint32_t.
///
/// @tparam ValueT The type of objects in the set.
/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @tparam SparseT An unsigned integer type. See above.
///
template<typename ValueT,
typename KeyFunctorT = llvm::identity<unsigned>,
typename SparseT = uint8_t>
class SparseSet {
static_assert(std::numeric_limits<SparseT>::is_integer &&
!std::numeric_limits<SparseT>::is_signed,
"SparseT must be an unsigned integer type");
typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<ValueT, 8> DenseT;
typedef unsigned size_type;
DenseT Dense;
SparseT *Sparse;
unsigned Universe;
KeyFunctorT KeyIndexOf;
SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
SparseSet(const SparseSet&) = delete;
SparseSet &operator=(const SparseSet&) = delete;
public:
typedef ValueT value_type;
typedef ValueT &reference;
typedef const ValueT &const_reference;
typedef ValueT *pointer;
typedef const ValueT *const_pointer;
SparseSet() : Sparse(nullptr), Universe(0) {}
~SparseSet() { free(Sparse); }
/// setUniverse - Set the universe size which determines the largest key the
/// set can hold. The universe must be sized before any elements can be
/// added.
///
/// @param U Universe size. All object keys must be less than U.
///
void setUniverse(unsigned U) {
// It's not hard to resize the universe on a non-empty set, but it doesn't
// seem like a likely use case, so we can add that code when we need it.
assert(empty() && "Can only resize universe on an empty map");
// Hysteresis prevents needless reallocations.
if (U >= Universe/4 && U <= Universe)
return;
free(Sparse);
// The Sparse array doesn't actually need to be initialized, so malloc
// would be enough here, but that will cause tools like valgrind to
// complain about branching on uninitialized data.
Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
Universe = U;
}
// Import trivial vector stuff from DenseT.
typedef typename DenseT::iterator iterator;
typedef typename DenseT::const_iterator const_iterator;
const_iterator begin() const { return Dense.begin(); }
const_iterator end() const { return Dense.end(); }
iterator begin() { return Dense.begin(); }
iterator end() { return Dense.end(); }
/// empty - Returns true if the set is empty.
///
/// This is not the same as BitVector::empty().
///
bool empty() const { return Dense.empty(); }
/// size - Returns the number of elements in the set.
///
/// This is not the same as BitVector::size() which returns the size of the
/// universe.
///
size_type size() const { return Dense.size(); }
/// clear - Clears the set. This is a very fast constant time operation.
///
void clear() {
// Sparse does not need to be cleared, see find().
Dense.clear();
}
/// findIndex - Find an element by its index.
///
/// @param Idx A valid index to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator findIndex(unsigned Idx) {
assert(Idx < Universe && "Key out of range");
const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
const unsigned FoundIdx = ValIndexOf(Dense[i]);
assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
if (Idx == FoundIdx)
return begin() + i;
// Stride is 0 when SparseT >= unsigned. We don't need to loop.
if (!Stride)
break;
}
return end();
}
/// find - Find an element by its key.
///
/// @param Key A valid key to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator find(const KeyT &Key) {
return findIndex(KeyIndexOf(Key));
}
const_iterator find(const KeyT &Key) const {
return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
}
/// count - Returns 1 if this set contains an element identified by Key,
/// 0 otherwise.
///
size_type count(const KeyT &Key) const {
return find(Key) == end() ? 0 : 1;
}
/// insert - Attempts to insert a new element.
///
/// If Val is successfully inserted, return (I, true), where I is an iterator
/// pointing to the newly inserted element.
///
/// If the set already contains an element with the same key as Val, return
/// (I, false), where I is an iterator pointing to the existing element.
///
/// Insertion invalidates all iterators.
///
std::pair<iterator, bool> insert(const ValueT &Val) {
unsigned Idx = ValIndexOf(Val);
iterator I = findIndex(Idx);
if (I != end())
return std::make_pair(I, false);
Sparse[Idx] = size();
Dense.push_back(Val);
return std::make_pair(end() - 1, true);
}
/// array subscript - If an element already exists with this key, return it.
/// Otherwise, automatically construct a new value from Key, insert it,
/// and return the newly inserted element.
ValueT &operator[](const KeyT &Key) {
return *insert(ValueT(Key)).first;
}
ValueT pop_back_val() {
// Sparse does not need to be cleared, see find().
return Dense.pop_back_val();
}
/// erase - Erases an existing element identified by a valid iterator.
///
/// This invalidates all iterators, but erase() returns an iterator pointing
/// to the next element. This makes it possible to erase selected elements
/// while iterating over the set:
///
/// for (SparseSet::iterator I = Set.begin(); I != Set.end();)
/// if (test(*I))
/// I = Set.erase(I);
/// else
/// ++I;
///
/// Note that end() changes when elements are erased, unlike std::list.
///
iterator erase(iterator I) {
assert(unsigned(I - begin()) < size() && "Invalid iterator");
if (I != end() - 1) {
*I = Dense.back();
unsigned BackIdx = ValIndexOf(Dense.back());
assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
Sparse[BackIdx] = I - begin();
}
// This depends on SmallVector::pop_back() not invalidating iterators.
// std::vector::pop_back() doesn't give that guarantee.
Dense.pop_back();
return I;
}
/// erase - Erases an element identified by Key, if it exists.
///
/// @param Key The key identifying the element to erase.
/// @returns True when an element was erased, false if no element was found.
///
bool erase(const KeyT &Key) {
iterator I = find(Key);
if (I == end())
return false;
erase(I);
return true;
}
};
} // end namespace llvm
#endif

View File

@ -1,171 +0,0 @@
//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the 'Statistic' class, which is designed to be an easy way
// to expose various metrics from passes. These statistics are printed at the
// end of a run (from llvm_shutdown), when the -stats command line option is
// passed on the command line.
//
// This is useful for reporting information like the number of instructions
// simplified, optimized or removed by various transformations, like this:
//
// static Statistic NumInstsKilled("gcse", "Number of instructions killed");
//
// Later, in the code: ++NumInstsKilled;
//
// NOTE: Statistics *must* be declared as global variables.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STATISTIC_H
#define LLVM_ADT_STATISTIC_H
#include "llvm/Support/Atomic.h"
#include "llvm/Support/Compiler.h"
#include <atomic>
#include <memory>
namespace llvm {
class raw_ostream;
class raw_fd_ostream;
class Statistic {
public:
const char *DebugType;
const char *Name;
const char *Desc;
std::atomic<unsigned> Value;
bool Initialized;
unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
const char *getDebugType() const { return DebugType; }
const char *getName() const { return Name; }
const char *getDesc() const { return Desc; }
/// construct - This should only be called for non-global statistics.
void construct(const char *debugtype, const char *name, const char *desc) {
DebugType = debugtype;
Name = name;
Desc = desc;
Value = 0;
Initialized = false;
}
// Allow use of this class as the value itself.
operator unsigned() const { return getValue(); }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
const Statistic &operator=(unsigned Val) {
Value.store(Val, std::memory_order_relaxed);
return init();
}
const Statistic &operator++() {
Value.fetch_add(1, std::memory_order_relaxed);
return init();
}
unsigned operator++(int) {
init();
return Value.fetch_add(1, std::memory_order_relaxed);
}
const Statistic &operator--() {
Value.fetch_sub(1, std::memory_order_relaxed);
return init();
}
unsigned operator--(int) {
init();
return Value.fetch_sub(1, std::memory_order_relaxed);
}
const Statistic &operator+=(unsigned V) {
if (V == 0)
return *this;
Value.fetch_add(V, std::memory_order_relaxed);
return init();
}
const Statistic &operator-=(unsigned V) {
if (V == 0)
return *this;
Value.fetch_sub(V, std::memory_order_relaxed);
return init();
}
#else // Statistics are disabled in release builds.
const Statistic &operator=(unsigned Val) {
return *this;
}
const Statistic &operator++() {
return *this;
}
unsigned operator++(int) {
return 0;
}
const Statistic &operator--() {
return *this;
}
unsigned operator--(int) {
return 0;
}
const Statistic &operator+=(const unsigned &V) {
return *this;
}
const Statistic &operator-=(const unsigned &V) {
return *this;
}
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
protected:
Statistic &init() {
bool tmp = Initialized;
sys::MemoryFence();
if (!tmp) RegisterStatistic();
TsanHappensAfter(this);
return *this;
}
void RegisterStatistic();
};
// STATISTIC - A macro to make definition of statistics really simple. This
// automatically passes the DEBUG_TYPE of the file into the statistic.
#define STATISTIC(VARNAME, DESC) \
static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, 0}
/// \brief Enable the collection and printing of statistics.
void EnableStatistics();
/// \brief Check if statistics are enabled.
bool AreStatisticsEnabled();
/// \brief Return a file stream to print our output on.
std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
/// \brief Print statistics to the file returned by CreateInfoOutputFile().
void PrintStatistics();
/// \brief Print statistics to the given output stream.
void PrintStatistics(raw_ostream &OS);
/// Print statistics in JSON format.
void PrintStatisticsJSON(raw_ostream &OS);
} // end llvm namespace
#endif // LLVM_ADT_STATISTIC_H

View File

@ -1,197 +0,0 @@
//===-- llvm/ADT/StringExtras.h - Useful string functions -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains some functions that are useful when dealing with strings.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <iterator>
namespace llvm {
template<typename T> class SmallVectorImpl;
/// hexdigit - Return the hexadecimal character for the
/// given number \p X (which should be less than 16).
static inline char hexdigit(unsigned X, bool LowerCase = false) {
const char HexChar = LowerCase ? 'a' : 'A';
return X < 10 ? '0' + X : HexChar + X - 10;
}
/// Construct a string ref from a boolean.
static inline StringRef toStringRef(bool B) {
return StringRef(B ? "true" : "false");
}
/// Interpret the given character \p C as a hexadecimal digit and return its
/// value.
///
/// If \p C is not a valid hex digit, -1U is returned.
static inline unsigned hexDigitValue(char C) {
if (C >= '0' && C <= '9') return C-'0';
if (C >= 'a' && C <= 'f') return C-'a'+10U;
if (C >= 'A' && C <= 'F') return C-'A'+10U;
return -1U;
}
static inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
char Buffer[17];
char *BufPtr = std::end(Buffer);
if (X == 0) *--BufPtr = '0';
while (X) {
unsigned char Mod = static_cast<unsigned char>(X) & 15;
*--BufPtr = hexdigit(Mod, LowerCase);
X >>= 4;
}
return std::string(BufPtr, std::end(Buffer));
}
/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
static inline std::string toHex(StringRef Input) {
static const char *const LUT = "0123456789ABCDEF";
size_t Length = Input.size();
std::string Output;
Output.reserve(2 * Length);
for (size_t i = 0; i < Length; ++i) {
const unsigned char c = Input[i];
Output.push_back(LUT[c >> 4]);
Output.push_back(LUT[c & 15]);
}
return Output;
}
static inline std::string utostr(uint64_t X, bool isNeg = false) {
char Buffer[21];
char *BufPtr = std::end(Buffer);
if (X == 0) *--BufPtr = '0'; // Handle special case...
while (X) {
*--BufPtr = '0' + char(X % 10);
X /= 10;
}
if (isNeg) *--BufPtr = '-'; // Add negative sign...
return std::string(BufPtr, std::end(Buffer));
}
static inline std::string itostr(int64_t X) {
if (X < 0)
return utostr(static_cast<uint64_t>(-X), true);
else
return utostr(static_cast<uint64_t>(X));
}
/// StrInStrNoCase - Portable version of strcasestr. Locates the first
/// occurrence of string 's1' in string 's2', ignoring case. Returns
/// the offset of s2 in s1 or npos if s2 cannot be found.
StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);
/// getToken - This function extracts one token from source, ignoring any
/// leading characters that appear in the Delimiters string, and ending the
/// token at any of the characters that appear in the Delimiters string. If
/// there are no tokens in the source string, an empty string is returned.
/// The function returns a pair containing the extracted token and the
/// remaining tail string.
std::pair<StringRef, StringRef> getToken(StringRef Source,
StringRef Delimiters = " \t\n\v\f\r");
/// SplitString - Split up the specified string according to the specified
/// delimiters, appending the result fragments to the output list.
void SplitString(StringRef Source,
SmallVectorImpl<StringRef> &OutFragments,
StringRef Delimiters = " \t\n\v\f\r");
/// HashString - Hash function for strings.
///
/// This is the Bernstein hash function.
//
// FIXME: Investigate whether a modified bernstein hash function performs
// better: http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
// X*33+c -> X*33^c
static inline unsigned HashString(StringRef Str, unsigned Result = 0) {
for (StringRef::size_type i = 0, e = Str.size(); i != e; ++i)
Result = Result * 33 + (unsigned char)Str[i];
return Result;
}
/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
static inline StringRef getOrdinalSuffix(unsigned Val) {
// It is critically important that we do this perfectly for
// user-written sequences with over 100 elements.
switch (Val % 100) {
case 11:
case 12:
case 13:
return "th";
default:
switch (Val % 10) {
case 1: return "st";
case 2: return "nd";
case 3: return "rd";
default: return "th";
}
}
}
template <typename IteratorT>
inline std::string join_impl(IteratorT Begin, IteratorT End,
StringRef Separator, std::input_iterator_tag) {
std::string S;
if (Begin == End)
return S;
S += (*Begin);
while (++Begin != End) {
S += Separator;
S += (*Begin);
}
return S;
}
template <typename IteratorT>
inline std::string join_impl(IteratorT Begin, IteratorT End,
StringRef Separator, std::forward_iterator_tag) {
std::string S;
if (Begin == End)
return S;
size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
for (IteratorT I = Begin; I != End; ++I)
Len += (*Begin).size();
S.reserve(Len);
S += (*Begin);
while (++Begin != End) {
S += Separator;
S += (*Begin);
}
return S;
}
/// Joins the strings in the range [Begin, End), adding Separator between
/// the elements.
template <typename IteratorT>
inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
typedef typename std::iterator_traits<IteratorT>::iterator_category tag;
return join_impl(Begin, End, Separator, tag());
}
} // End llvm namespace
#endif

View File

@ -1,505 +0,0 @@
//===--- StringMap.h - String Hash table map interface ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the StringMap class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGMAP_H
#define LLVM_ADT_STRINGMAP_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cstring>
#include <utility>
namespace llvm {
template<typename ValueT>
class StringMapConstIterator;
template<typename ValueT>
class StringMapIterator;
template<typename ValueTy>
class StringMapEntry;
/// StringMapEntryBase - Shared base class of StringMapEntry instances.
class StringMapEntryBase {
unsigned StrLen;
public:
explicit StringMapEntryBase(unsigned Len) : StrLen(Len) {}
unsigned getKeyLength() const { return StrLen; }
};
/// StringMapImpl - This is the base class of StringMap that is shared among
/// all of its instantiations.
class StringMapImpl {
protected:
// Array of NumBuckets pointers to entries, null pointers are holes.
// TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed
// by an array of the actual hash values as unsigned integers.
StringMapEntryBase **TheTable;
unsigned NumBuckets;
unsigned NumItems;
unsigned NumTombstones;
unsigned ItemSize;
protected:
explicit StringMapImpl(unsigned itemSize)
: TheTable(nullptr),
// Initialize the map with zero buckets to allocation.
NumBuckets(0), NumItems(0), NumTombstones(0), ItemSize(itemSize) {}
StringMapImpl(StringMapImpl &&RHS)
: TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
ItemSize(RHS.ItemSize) {
RHS.TheTable = nullptr;
RHS.NumBuckets = 0;
RHS.NumItems = 0;
RHS.NumTombstones = 0;
}
StringMapImpl(unsigned InitSize, unsigned ItemSize);
unsigned RehashTable(unsigned BucketNo = 0);
/// LookupBucketFor - Look up the bucket that the specified string should end
/// up in. If it already exists as a key in the map, the Item pointer for the
/// specified bucket will be non-null. Otherwise, it will be null. In either
/// case, the FullHashValue field of the bucket will be set to the hash value
/// of the string.
unsigned LookupBucketFor(StringRef Key);
/// FindKey - Look up the bucket that contains the specified key. If it exists
/// in the map, return the bucket number of the key. Otherwise return -1.
/// This does not modify the map.
int FindKey(StringRef Key) const;
/// RemoveKey - Remove the specified StringMapEntry from the table, but do not
/// delete it. This aborts if the value isn't in the table.
void RemoveKey(StringMapEntryBase *V);
/// RemoveKey - Remove the StringMapEntry for the specified key from the
/// table, returning it. If the key is not in the table, this returns null.
StringMapEntryBase *RemoveKey(StringRef Key);
/// Allocate the table with the specified number of buckets and otherwise
/// setup the map as empty.
void init(unsigned Size);
public:
static StringMapEntryBase *getTombstoneVal() {
uintptr_t Val = static_cast<uintptr_t>(-1);
Val <<= PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;
return reinterpret_cast<StringMapEntryBase *>(Val);
}
unsigned getNumBuckets() const { return NumBuckets; }
unsigned getNumItems() const { return NumItems; }
bool empty() const { return NumItems == 0; }
unsigned size() const { return NumItems; }
void swap(StringMapImpl &Other) {
std::swap(TheTable, Other.TheTable);
std::swap(NumBuckets, Other.NumBuckets);
std::swap(NumItems, Other.NumItems);
std::swap(NumTombstones, Other.NumTombstones);
}
};
/// StringMapEntry - This is used to represent one value that is inserted into
/// a StringMap. It contains the Value itself and the key: the string length
/// and data.
template<typename ValueTy>
class StringMapEntry : public StringMapEntryBase {
StringMapEntry(StringMapEntry &E) = delete;
public:
ValueTy second;
explicit StringMapEntry(unsigned strLen)
: StringMapEntryBase(strLen), second() {}
template <typename... InitTy>
StringMapEntry(unsigned strLen, InitTy &&... InitVals)
: StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {}
StringRef getKey() const {
return StringRef(getKeyData(), getKeyLength());
}
const ValueTy &getValue() const { return second; }
ValueTy &getValue() { return second; }
void setValue(const ValueTy &V) { second = V; }
/// getKeyData - Return the start of the string data that is the key for this
/// value. The string data is always stored immediately after the
/// StringMapEntry object.
const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);}
StringRef first() const { return StringRef(getKeyData(), getKeyLength()); }
/// Create a StringMapEntry for the specified key construct the value using
/// \p InitiVals.
template <typename AllocatorTy, typename... InitTy>
static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator,
InitTy &&... InitVals) {
unsigned KeyLength = Key.size();
// Allocate a new item with space for the string at the end and a null
// terminator.
unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
KeyLength+1;
unsigned Alignment = alignOf<StringMapEntry>();
StringMapEntry *NewItem =
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
// Construct the value.
new (NewItem) StringMapEntry(KeyLength, std::forward<InitTy>(InitVals)...);
// Copy the string information.
char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
if (KeyLength > 0)
memcpy(StrBuffer, Key.data(), KeyLength);
StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients.
return NewItem;
}
/// Create - Create a StringMapEntry with normal malloc/free.
template <typename... InitType>
static StringMapEntry *Create(StringRef Key, InitType &&... InitVal) {
MallocAllocator A;
return Create(Key, A, std::forward<InitType>(InitVal)...);
}
static StringMapEntry *Create(StringRef Key) {
return Create(Key, ValueTy());
}
/// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
/// into a StringMapEntry, return the StringMapEntry itself.
static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
return *reinterpret_cast<StringMapEntry*>(Ptr);
}
/// Destroy - Destroy this StringMapEntry, releasing memory back to the
/// specified allocator.
template<typename AllocatorTy>
void Destroy(AllocatorTy &Allocator) {
// Free memory referenced by the item.
unsigned AllocSize =
static_cast<unsigned>(sizeof(StringMapEntry)) + getKeyLength() + 1;
this->~StringMapEntry();
Allocator.Deallocate(static_cast<void *>(this), AllocSize);
}
/// Destroy this object, releasing memory back to the malloc allocator.
void Destroy() {
MallocAllocator A;
Destroy(A);
}
};
/// StringMap - This is an unconventional map that is specialized for handling
/// keys that are "strings", which are basically ranges of bytes. This does some
/// funky memory allocation and hashing things to make it extremely efficient,
/// storing the string data *after* the value in the map.
template<typename ValueTy, typename AllocatorTy = MallocAllocator>
class StringMap : public StringMapImpl {
AllocatorTy Allocator;
public:
typedef StringMapEntry<ValueTy> MapEntryTy;
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(AllocatorTy A)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
StringMap(unsigned InitialSize, AllocatorTy A)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
Allocator(A) {}
StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
: StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
for (const auto &P : List) {
insert(P);
}
}
StringMap(StringMap &&RHS)
: StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
StringMap &operator=(StringMap RHS) {
StringMapImpl::swap(RHS);
std::swap(Allocator, RHS.Allocator);
return *this;
}
StringMap(const StringMap &RHS) :
StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
Allocator(RHS.Allocator) {
if (RHS.empty())
return;
// Allocate TheTable of the same size as RHS's TheTable, and set the
// sentinel appropriately (and NumBuckets).
init(RHS.NumBuckets);
unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1),
*RHSHashTable = (unsigned *)(RHS.TheTable + NumBuckets + 1);
NumItems = RHS.NumItems;
NumTombstones = RHS.NumTombstones;
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *Bucket = RHS.TheTable[I];
if (!Bucket || Bucket == getTombstoneVal()) {
TheTable[I] = Bucket;
continue;
}
TheTable[I] = MapEntryTy::Create(
static_cast<MapEntryTy *>(Bucket)->getKey(), Allocator,
static_cast<MapEntryTy *>(Bucket)->getValue());
HashTable[I] = RHSHashTable[I];
}
// Note that here we've copied everything from the RHS into this object,
// tombstones included. We could, instead, have re-probed for each key to
// instantiate this new object without any tombstone buckets. The
// assumption here is that items are rarely deleted from most StringMaps,
// and so tombstones are rare, so the cost of re-probing for all inputs is
// not worthwhile.
}
AllocatorTy &getAllocator() { return Allocator; }
const AllocatorTy &getAllocator() const { return Allocator; }
typedef const char* key_type;
typedef ValueTy mapped_type;
typedef StringMapEntry<ValueTy> value_type;
typedef size_t size_type;
typedef StringMapConstIterator<ValueTy> const_iterator;
typedef StringMapIterator<ValueTy> iterator;
iterator begin() {
return iterator(TheTable, NumBuckets == 0);
}
iterator end() {
return iterator(TheTable+NumBuckets, true);
}
const_iterator begin() const {
return const_iterator(TheTable, NumBuckets == 0);
}
const_iterator end() const {
return const_iterator(TheTable+NumBuckets, true);
}
iterator find(StringRef Key) {
int Bucket = FindKey(Key);
if (Bucket == -1) return end();
return iterator(TheTable+Bucket, true);
}
const_iterator find(StringRef Key) const {
int Bucket = FindKey(Key);
if (Bucket == -1) return end();
return const_iterator(TheTable+Bucket, true);
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueTy lookup(StringRef Key) const {
const_iterator it = find(Key);
if (it != end())
return it->second;
return ValueTy();
}
/// Lookup the ValueTy for the \p Key, or create a default constructed value
/// if the key is not in the map.
ValueTy &operator[](StringRef Key) {
return emplace_second(Key).first->second;
}
/// count - Return 1 if the element is in the map, 0 otherwise.
size_type count(StringRef Key) const {
return find(Key) == end() ? 0 : 1;
}
/// insert - Insert the specified key/value pair into the map. If the key
/// already exists in the map, return false and ignore the request, otherwise
/// insert it and return true.
bool insert(MapEntryTy *KeyValue) {
unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
StringMapEntryBase *&Bucket = TheTable[BucketNo];
if (Bucket && Bucket != getTombstoneVal())
return false; // Already exists in map.
if (Bucket == getTombstoneVal())
--NumTombstones;
Bucket = KeyValue;
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
RehashTable();
return true;
}
/// insert - Inserts the specified key/value pair into the map if the key
/// isn't already in the map. The bool component of the returned pair is true
/// if and only if the insertion takes place, and the iterator component of
/// the pair points to the element with key equivalent to the key of the pair.
std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
return emplace_second(KV.first, std::move(KV.second));
}
/// Emplace a new element for the specified key into the map if the key isn't
/// already in the map. The bool component of the returned pair is true
/// if and only if the insertion takes place, and the iterator component of
/// the pair points to the element with key equivalent to the key of the pair.
template <typename... ArgsTy>
std::pair<iterator, bool> emplace_second(StringRef Key, ArgsTy &&... Args) {
unsigned BucketNo = LookupBucketFor(Key);
StringMapEntryBase *&Bucket = TheTable[BucketNo];
if (Bucket && Bucket != getTombstoneVal())
return std::make_pair(iterator(TheTable + BucketNo, false),
false); // Already exists in map.
if (Bucket == getTombstoneVal())
--NumTombstones;
Bucket = MapEntryTy::Create(Key, Allocator, std::forward<ArgsTy>(Args)...);
++NumItems;
assert(NumItems + NumTombstones <= NumBuckets);
BucketNo = RehashTable(BucketNo);
return std::make_pair(iterator(TheTable + BucketNo, false), true);
}
// clear - Empties out the StringMap
void clear() {
if (empty()) return;
// Zap all values, resetting the keys back to non-present (not tombstone),
// which is safe because we're removing all elements.
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *&Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
}
Bucket = nullptr;
}
NumItems = 0;
NumTombstones = 0;
}
/// remove - Remove the specified key/value pair from the map, but do not
/// erase it. This aborts if the key is not in the map.
void remove(MapEntryTy *KeyValue) {
RemoveKey(KeyValue);
}
void erase(iterator I) {
MapEntryTy &V = *I;
remove(&V);
V.Destroy(Allocator);
}
bool erase(StringRef Key) {
iterator I = find(Key);
if (I == end()) return false;
erase(I);
return true;
}
~StringMap() {
// Delete all the elements in the map, but don't reset the elements
// to default values. This is a copy of clear(), but avoids unnecessary
// work not required in the destructor.
if (!empty()) {
for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
StringMapEntryBase *Bucket = TheTable[I];
if (Bucket && Bucket != getTombstoneVal()) {
static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
}
}
}
free(TheTable);
}
};
template <typename ValueTy> class StringMapConstIterator {
protected:
StringMapEntryBase **Ptr;
public:
typedef StringMapEntry<ValueTy> value_type;
StringMapConstIterator() : Ptr(nullptr) { }
explicit StringMapConstIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
: Ptr(Bucket) {
if (!NoAdvance) AdvancePastEmptyBuckets();
}
const value_type &operator*() const {
return *static_cast<StringMapEntry<ValueTy>*>(*Ptr);
}
const value_type *operator->() const {
return static_cast<StringMapEntry<ValueTy>*>(*Ptr);
}
bool operator==(const StringMapConstIterator &RHS) const {
return Ptr == RHS.Ptr;
}
bool operator!=(const StringMapConstIterator &RHS) const {
return Ptr != RHS.Ptr;
}
inline StringMapConstIterator& operator++() { // Preincrement
++Ptr;
AdvancePastEmptyBuckets();
return *this;
}
StringMapConstIterator operator++(int) { // Postincrement
StringMapConstIterator tmp = *this; ++*this; return tmp;
}
private:
void AdvancePastEmptyBuckets() {
while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
++Ptr;
}
};
template<typename ValueTy>
class StringMapIterator : public StringMapConstIterator<ValueTy> {
public:
StringMapIterator() {}
explicit StringMapIterator(StringMapEntryBase **Bucket,
bool NoAdvance = false)
: StringMapConstIterator<ValueTy>(Bucket, NoAdvance) {
}
StringMapEntry<ValueTy> &operator*() const {
return *static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
}
StringMapEntry<ValueTy> *operator->() const {
return static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
}
};
}
#endif

View File

@ -1,632 +0,0 @@
//===--- StringRef.h - Constant String Reference Wrapper --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <string>
#include <utility>
namespace llvm {
template <typename T>
class SmallVectorImpl;
class APInt;
class hash_code;
class StringRef;
/// Helper functions for StringRef::getAsInteger.
bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
unsigned long long &Result);
bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
/// StringRef - Represent a constant reference to a string, i.e. a character
/// array and a length, which need not be null terminated.
///
/// This class does not own the string data, it is expected to be used in
/// situations where the character data resides in some other buffer, whose
/// lifetime extends past that of the StringRef. For this reason, it is not in
/// general safe to store a StringRef.
class StringRef {
public:
typedef const char *iterator;
typedef const char *const_iterator;
static const size_t npos = ~size_t(0);
typedef size_t size_type;
private:
/// The start of the string, in an external buffer.
const char *Data;
/// The length of the string.
size_t Length;
// Workaround memcmp issue with null pointers (undefined behavior)
// by providing a specialized version
LLVM_ATTRIBUTE_ALWAYS_INLINE
static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
if (Length == 0) { return 0; }
return ::memcmp(Lhs,Rhs,Length);
}
public:
/// @name Constructors
/// @{
/// Construct an empty string ref.
/*implicit*/ StringRef() : Data(nullptr), Length(0) {}
/// Construct a string ref from a cstring.
/*implicit*/ StringRef(const char *Str)
: Data(Str) {
assert(Str && "StringRef cannot be built from a NULL argument");
Length = ::strlen(Str); // invoking strlen(NULL) is undefined behavior
}
/// Construct a string ref from a pointer and length.
LLVM_ATTRIBUTE_ALWAYS_INLINE
/*implicit*/ StringRef(const char *data, size_t length)
: Data(data), Length(length) {
assert((data || length == 0) &&
"StringRef cannot be built from a NULL argument with non-null length");
}
/// Construct a string ref from an std::string.
LLVM_ATTRIBUTE_ALWAYS_INLINE
/*implicit*/ StringRef(const std::string &Str)
: Data(Str.data()), Length(Str.length()) {}
/// @}
/// @name Iterators
/// @{
iterator begin() const { return Data; }
iterator end() const { return Data + Length; }
const unsigned char *bytes_begin() const {
return reinterpret_cast<const unsigned char *>(begin());
}
const unsigned char *bytes_end() const {
return reinterpret_cast<const unsigned char *>(end());
}
iterator_range<const unsigned char *> bytes() const {
return make_range(bytes_begin(), bytes_end());
}
/// @}
/// @name String Operations
/// @{
/// data - Get a pointer to the start of the string (which may not be null
/// terminated).
LLVM_ATTRIBUTE_ALWAYS_INLINE
const char *data() const { return Data; }
/// empty - Check if the string is empty.
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool empty() const { return Length == 0; }
/// size - Get the string size.
LLVM_ATTRIBUTE_ALWAYS_INLINE
size_t size() const { return Length; }
/// front - Get the first character in the string.
char front() const {
assert(!empty());
return Data[0];
}
/// back - Get the last character in the string.
char back() const {
assert(!empty());
return Data[Length-1];
}
// copy - Allocate copy in Allocator and return StringRef to it.
template <typename Allocator> StringRef copy(Allocator &A) const {
// Don't request a length 0 copy from the allocator.
if (empty())
return StringRef();
char *S = A.template Allocate<char>(Length);
std::copy(begin(), end(), S);
return StringRef(S, Length);
}
/// equals - Check for string equality, this is more efficient than
/// compare() when the relative ordering of inequal strings isn't needed.
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool equals(StringRef RHS) const {
return (Length == RHS.Length &&
compareMemory(Data, RHS.Data, RHS.Length) == 0);
}
/// equals_lower - Check for string equality, ignoring case.
bool equals_lower(StringRef RHS) const {
return Length == RHS.Length && compare_lower(RHS) == 0;
}
/// compare - Compare two strings; the result is -1, 0, or 1 if this string
/// is lexicographically less than, equal to, or greater than the \p RHS.
LLVM_ATTRIBUTE_ALWAYS_INLINE
int compare(StringRef RHS) const {
// Check the prefix for a mismatch.
if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length)))
return Res < 0 ? -1 : 1;
// Otherwise the prefixes match, so we only need to check the lengths.
if (Length == RHS.Length)
return 0;
return Length < RHS.Length ? -1 : 1;
}
/// compare_lower - Compare two strings, ignoring case.
int compare_lower(StringRef RHS) const;
/// compare_numeric - Compare two strings, treating sequences of digits as
/// numbers.
int compare_numeric(StringRef RHS) const;
/// \brief Determine the edit distance between this string and another
/// string.
///
/// \param Other the string to compare this string against.
///
/// \param AllowReplacements whether to allow character
/// replacements (change one character into another) as a single
/// operation, rather than as two operations (an insertion and a
/// removal).
///
/// \param MaxEditDistance If non-zero, the maximum edit distance that
/// this routine is allowed to compute. If the edit distance will exceed
/// that maximum, returns \c MaxEditDistance+1.
///
/// \returns the minimum number of character insertions, removals,
/// or (if \p AllowReplacements is \c true) replacements needed to
/// transform one of the given strings into the other. If zero,
/// the strings are identical.
unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
unsigned MaxEditDistance = 0) const;
/// str - Get the contents as an std::string.
std::string str() const {
if (!Data) return std::string();
return std::string(Data, Length);
}
/// @}
/// @name Operator Overloads
/// @{
char operator[](size_t Index) const {
assert(Index < Length && "Invalid index!");
return Data[Index];
}
/// @}
/// @name Type Conversions
/// @{
operator std::string() const {
return str();
}
/// @}
/// @name String Predicates
/// @{
/// Check if this string starts with the given \p Prefix.
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool startswith(StringRef Prefix) const {
return Length >= Prefix.Length &&
compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
}
/// Check if this string starts with the given \p Prefix, ignoring case.
bool startswith_lower(StringRef Prefix) const;
/// Check if this string ends with the given \p Suffix.
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool endswith(StringRef Suffix) const {
return Length >= Suffix.Length &&
compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
}
/// Check if this string ends with the given \p Suffix, ignoring case.
bool endswith_lower(StringRef Suffix) const;
/// @}
/// @name String Searching
/// @{
/// Search for the first character \p C in the string.
///
/// \returns The index of the first occurrence of \p C, or npos if not
/// found.
LLVM_ATTRIBUTE_ALWAYS_INLINE
size_t find(char C, size_t From = 0) const {
size_t FindBegin = std::min(From, Length);
if (FindBegin < Length) { // Avoid calling memchr with nullptr.
// Just forward to memchr, which is faster than a hand-rolled loop.
if (const void *P = ::memchr(Data + FindBegin, C, Length - FindBegin))
return static_cast<const char *>(P) - Data;
}
return npos;
}
/// Search for the first string \p Str in the string.
///
/// \returns The index of the first occurrence of \p Str, or npos if not
/// found.
size_t find(StringRef Str, size_t From = 0) const;
/// Search for the last character \p C in the string.
///
/// \returns The index of the last occurrence of \p C, or npos if not
/// found.
size_t rfind(char C, size_t From = npos) const {
From = std::min(From, Length);
size_t i = From;
while (i != 0) {
--i;
if (Data[i] == C)
return i;
}
return npos;
}
/// Search for the last string \p Str in the string.
///
/// \returns The index of the last occurrence of \p Str, or npos if not
/// found.
size_t rfind(StringRef Str) const;
/// Find the first character in the string that is \p C, or npos if not
/// found. Same as find.
size_t find_first_of(char C, size_t From = 0) const {
return find(C, From);
}
/// Find the first character in the string that is in \p Chars, or npos if
/// not found.
///
/// Complexity: O(size() + Chars.size())
size_t find_first_of(StringRef Chars, size_t From = 0) const;
/// Find the first character in the string that is not \p C or npos if not
/// found.
size_t find_first_not_of(char C, size_t From = 0) const;
/// Find the first character in the string that is not in the string
/// \p Chars, or npos if not found.
///
/// Complexity: O(size() + Chars.size())
size_t find_first_not_of(StringRef Chars, size_t From = 0) const;
/// Find the last character in the string that is \p C, or npos if not
/// found.
size_t find_last_of(char C, size_t From = npos) const {
return rfind(C, From);
}
/// Find the last character in the string that is in \p C, or npos if not
/// found.
///
/// Complexity: O(size() + Chars.size())
size_t find_last_of(StringRef Chars, size_t From = npos) const;
/// Find the last character in the string that is not \p C, or npos if not
/// found.
size_t find_last_not_of(char C, size_t From = npos) const;
/// Find the last character in the string that is not in \p Chars, or
/// npos if not found.
///
/// Complexity: O(size() + Chars.size())
size_t find_last_not_of(StringRef Chars, size_t From = npos) const;
/// @}
/// @name Helpful Algorithms
/// @{
/// Return the number of occurrences of \p C in the string.
size_t count(char C) const {
size_t Count = 0;
for (size_t i = 0, e = Length; i != e; ++i)
if (Data[i] == C)
++Count;
return Count;
}
/// Return the number of non-overlapped occurrences of \p Str in
/// the string.
size_t count(StringRef Str) const;
/// Parse the current string as an integer of the specified radix. If
/// \p Radix is specified as zero, this does radix autosensing using
/// extended C rules: 0 is octal, 0x is hex, 0b is binary.
///
/// If the string is invalid or if only a subset of the string is valid,
/// this returns true to signify the error. The string is considered
/// erroneous if empty or if it overflows T.
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
getAsInteger(unsigned Radix, T &Result) const {
long long LLVal;
if (getAsSignedInteger(*this, Radix, LLVal) ||
static_cast<T>(LLVal) != LLVal)
return true;
Result = LLVal;
return false;
}
template <typename T>
typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
getAsInteger(unsigned Radix, T &Result) const {
unsigned long long ULLVal;
// The additional cast to unsigned long long is required to avoid the
// Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type
// 'unsigned __int64' when instantiating getAsInteger with T = bool.
if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
return true;
Result = ULLVal;
return false;
}
/// Parse the current string as an integer of the specified \p Radix, or of
/// an autosensed radix if the \p Radix given is 0. The current value in
/// \p Result is discarded, and the storage is changed to be wide enough to
/// store the parsed integer.
///
/// \returns true if the string does not solely consist of a valid
/// non-empty number in the appropriate base.
///
/// APInt::fromString is superficially similar but assumes the
/// string is well-formed in the given radix.
bool getAsInteger(unsigned Radix, APInt &Result) const;
/// @}
/// @name String Operations
/// @{
// Convert the given ASCII string to lowercase.
std::string lower() const;
/// Convert the given ASCII string to uppercase.
std::string upper() const;
/// @}
/// @name Substring Operations
/// @{
/// Return a reference to the substring from [Start, Start + N).
///
/// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
/// \param N The number of characters to included in the substring. If N
/// exceeds the number of characters remaining in the string, the string
/// suffix (starting with \p Start) will be returned.
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef substr(size_t Start, size_t N = npos) const {
Start = std::min(Start, Length);
return StringRef(Data + Start, std::min(N, Length - Start));
}
/// Return a StringRef equal to 'this' but with the first \p N elements
/// dropped.
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return substr(N);
}
/// Return a StringRef equal to 'this' but with the last \p N elements
/// dropped.
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef drop_back(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return substr(0, size()-N);
}
/// Return a reference to the substring from [Start, End).
///
/// \param Start The index of the starting character in the substring; if
/// the index is npos or greater than the length of the string then the
/// empty substring will be returned.
///
/// \param End The index following the last character to include in the
/// substring. If this is npos or exceeds the number of characters
/// remaining in the string, the string suffix (starting with \p Start)
/// will be returned. If this is less than \p Start, an empty string will
/// be returned.
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringRef slice(size_t Start, size_t End) const {
Start = std::min(Start, Length);
End = std::min(std::max(Start, End), Length);
return StringRef(Data + Start, End - Start);
}
/// Split into two substrings around the first occurrence of a separator
/// character.
///
/// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// maximal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator The character to split on.
/// \returns The split substrings.
std::pair<StringRef, StringRef> split(char Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
/// Split into two substrings around the first occurrence of a separator
/// string.
///
/// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// maximal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The string to split on.
/// \return - The split substrings.
std::pair<StringRef, StringRef> split(StringRef Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
}
/// Split into substrings around the occurrences of a separator string.
///
/// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
/// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
/// elements are added to A.
/// If \p KeepEmpty is false, empty strings are not added to \p A. They
/// still count when considering \p MaxSplit
/// An useful invariant is that
/// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
///
/// \param A - Where to put the substrings.
/// \param Separator - The string to split on.
/// \param MaxSplit - The maximum number of times the string is split.
/// \param KeepEmpty - True if empty substring should be added.
void split(SmallVectorImpl<StringRef> &A,
StringRef Separator, int MaxSplit = -1,
bool KeepEmpty = true) const;
/// Split into substrings around the occurrences of a separator character.
///
/// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
/// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
/// elements are added to A.
/// If \p KeepEmpty is false, empty strings are not added to \p A. They
/// still count when considering \p MaxSplit
/// An useful invariant is that
/// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
///
/// \param A - Where to put the substrings.
/// \param Separator - The string to split on.
/// \param MaxSplit - The maximum number of times the string is split.
/// \param KeepEmpty - True if empty substring should be added.
void split(SmallVectorImpl<StringRef> &A, char Separator, int MaxSplit = -1,
bool KeepEmpty = true) const;
/// Split into two substrings around the last occurrence of a separator
/// character.
///
/// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// minimal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The character to split on.
/// \return - The split substrings.
std::pair<StringRef, StringRef> rsplit(char Separator) const {
size_t Idx = rfind(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
/// Return string with consecutive \p Char characters starting from the
/// the left removed.
StringRef ltrim(char Char) const {
return drop_front(std::min(Length, find_first_not_of(Char)));
}
/// Return string with consecutive characters in \p Chars starting from
/// the left removed.
StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_front(std::min(Length, find_first_not_of(Chars)));
}
/// Return string with consecutive \p Char characters starting from the
/// right removed.
StringRef rtrim(char Char) const {
return drop_back(Length - std::min(Length, find_last_not_of(Char) + 1));
}
/// Return string with consecutive characters in \p Chars starting from
/// the right removed.
StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
}
/// Return string with consecutive \p Char characters starting from the
/// left and right removed.
StringRef trim(char Char) const {
return ltrim(Char).rtrim(Char);
}
/// Return string with consecutive characters in \p Chars starting from
/// the left and right removed.
StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
return ltrim(Chars).rtrim(Chars);
}
/// @}
};
/// @name StringRef Comparison Operators
/// @{
LLVM_ATTRIBUTE_ALWAYS_INLINE
inline bool operator==(StringRef LHS, StringRef RHS) {
return LHS.equals(RHS);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
inline bool operator!=(StringRef LHS, StringRef RHS) {
return !(LHS == RHS);
}
inline bool operator<(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) == -1;
}
inline bool operator<=(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) != 1;
}
inline bool operator>(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) == 1;
}
inline bool operator>=(StringRef LHS, StringRef RHS) {
return LHS.compare(RHS) != -1;
}
inline std::string &operator+=(std::string &buffer, StringRef string) {
return buffer.append(string.data(), string.size());
}
/// @}
/// \brief Compute a hash_code for a StringRef.
hash_code hash_value(StringRef S);
// StringRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <> struct isPodLike<StringRef> { static const bool value = true; };
}
#endif

View File

@ -1,45 +0,0 @@
//===--- StringSet.h - The LLVM Compiler Driver -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open
// Source License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// StringSet - A set-like wrapper for the StringMap.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRINGSET_H
#define LLVM_ADT_STRINGSET_H
#include "llvm/ADT/StringMap.h"
namespace llvm {
/// StringSet - A wrapper for StringMap that provides set-like functionality.
template <class AllocatorTy = llvm::MallocAllocator>
class StringSet : public llvm::StringMap<char, AllocatorTy> {
typedef llvm::StringMap<char, AllocatorTy> base;
public:
StringSet() = default;
StringSet(std::initializer_list<StringRef> S) {
for (StringRef X : S)
insert(X);
}
std::pair<typename base::iterator, bool> insert(StringRef Key) {
assert(!Key.empty());
return base::insert(std::make_pair(Key, '\0'));
}
template <typename InputIt>
void insert(const InputIt &Begin, const InputIt &End) {
for (auto It = Begin; It != End; ++It)
base::insert(std::make_pair(*It, '\0'));
}
};
}
#endif // LLVM_ADT_STRINGSET_H

View File

@ -1,166 +0,0 @@
//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===/
//
// This file implements the StringSwitch template, which mimics a switch()
// statement whose cases are string literals.
//
//===----------------------------------------------------------------------===/
#ifndef LLVM_ADT_STRINGSWITCH_H
#define LLVM_ADT_STRINGSWITCH_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstring>
namespace llvm {
/// \brief A switch()-like statement whose cases are string literals.
///
/// The StringSwitch class is a simple form of a switch() statement that
/// determines whether the given string matches one of the given string
/// literals. The template type parameter \p T is the type of the value that
/// will be returned from the string-switch expression. For example,
/// the following code switches on the name of a color in \c argv[i]:
///
/// \code
/// Color color = StringSwitch<Color>(argv[i])
/// .Case("red", Red)
/// .Case("orange", Orange)
/// .Case("yellow", Yellow)
/// .Case("green", Green)
/// .Case("blue", Blue)
/// .Case("indigo", Indigo)
/// .Cases("violet", "purple", Violet)
/// .Default(UnknownColor);
/// \endcode
template<typename T, typename R = T>
class StringSwitch {
/// \brief The string we are matching.
StringRef Str;
/// \brief The pointer to the result of this switch statement, once known,
/// null before that.
const T *Result;
public:
LLVM_ATTRIBUTE_ALWAYS_INLINE
explicit StringSwitch(StringRef S)
: Str(S), Result(nullptr) { }
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Case(const char (&S)[N], const T& Value) {
if (!Result && N-1 == Str.size() &&
(std::memcmp(S, Str.data(), N-1) == 0)) {
Result = &Value;
}
return *this;
}
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& EndsWith(const char (&S)[N], const T &Value) {
if (!Result && Str.size() >= N-1 &&
std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0) {
Result = &Value;
}
return *this;
}
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& StartsWith(const char (&S)[N], const T &Value) {
if (!Result && Str.size() >= N-1 &&
std::memcmp(S, Str.data(), N-1) == 0) {
Result = &Value;
}
return *this;
}
template<unsigned N0, unsigned N1>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
const T& Value) {
if (!Result && (
(N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
(N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0))) {
Result = &Value;
}
return *this;
}
template<unsigned N0, unsigned N1, unsigned N2>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const T& Value) {
if (!Result && (
(N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
(N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
(N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0))) {
Result = &Value;
}
return *this;
}
template<unsigned N0, unsigned N1, unsigned N2, unsigned N3>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const T& Value) {
if (!Result && (
(N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
(N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
(N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0) ||
(N3-1 == Str.size() && std::memcmp(S3, Str.data(), N3-1) == 0))) {
Result = &Value;
}
return *this;
}
template<unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const T& Value) {
if (!Result && (
(N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
(N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
(N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0) ||
(N3-1 == Str.size() && std::memcmp(S3, Str.data(), N3-1) == 0) ||
(N4-1 == Str.size() && std::memcmp(S4, Str.data(), N4-1) == 0))) {
Result = &Value;
}
return *this;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
R Default(const T& Value) const {
if (Result)
return *Result;
return Value;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
operator R() const {
assert(Result && "Fell off the end of a string-switch");
return *Result;
}
};
} // end namespace llvm
#endif // LLVM_ADT_STRINGSWITCH_H

View File

@ -1,334 +0,0 @@
//===- llvm/ADT/TinyPtrVector.h - 'Normally tiny' vectors -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_TINYPTRVECTOR_H
#define LLVM_ADT_TINYPTRVECTOR_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
/// TinyPtrVector - This class is specialized for cases where there are
/// normally 0 or 1 element in a vector, but is general enough to go beyond that
/// when required.
///
/// NOTE: This container doesn't allow you to store a null pointer into it.
///
template <typename EltTy>
class TinyPtrVector {
public:
typedef llvm::SmallVector<EltTy, 4> VecTy;
typedef typename VecTy::value_type value_type;
typedef llvm::PointerUnion<EltTy, VecTy *> PtrUnion;
private:
PtrUnion Val;
public:
TinyPtrVector() {}
~TinyPtrVector() {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
delete V;
}
TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
Val = new VecTy(*V);
}
TinyPtrVector &operator=(const TinyPtrVector &RHS) {
if (this == &RHS)
return *this;
if (RHS.empty()) {
this->clear();
return *this;
}
// Try to squeeze into the single slot. If it won't fit, allocate a copied
// vector.
if (Val.template is<EltTy>()) {
if (RHS.size() == 1)
Val = RHS.front();
else
Val = new VecTy(*RHS.Val.template get<VecTy*>());
return *this;
}
// If we have a full vector allocated, try to re-use it.
if (RHS.Val.template is<EltTy>()) {
Val.template get<VecTy*>()->clear();
Val.template get<VecTy*>()->push_back(RHS.front());
} else {
*Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
}
return *this;
}
TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
RHS.Val = (EltTy)nullptr;
}
TinyPtrVector &operator=(TinyPtrVector &&RHS) {
if (this == &RHS)
return *this;
if (RHS.empty()) {
this->clear();
return *this;
}
// If this vector has been allocated on the heap, re-use it if cheap. If it
// would require more copying, just delete it and we'll steal the other
// side.
if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
if (RHS.Val.template is<EltTy>()) {
V->clear();
V->push_back(RHS.front());
return *this;
}
delete V;
}
Val = RHS.Val;
RHS.Val = (EltTy)nullptr;
return *this;
}
/// Constructor from an ArrayRef.
///
/// This also is a constructor for individual array elements due to the single
/// element constructor for ArrayRef.
explicit TinyPtrVector(ArrayRef<EltTy> Elts)
: Val(Elts.empty()
? PtrUnion()
: Elts.size() == 1
? PtrUnion(Elts[0])
: PtrUnion(new VecTy(Elts.begin(), Elts.end()))) {}
TinyPtrVector(size_t Count, EltTy Value)
: Val(Count == 0 ? PtrUnion()
: Count == 1 ? PtrUnion(Value)
: PtrUnion(new VecTy(Count, Value))) {}
// implicit conversion operator to ArrayRef.
operator ArrayRef<EltTy>() const {
if (Val.isNull())
return None;
if (Val.template is<EltTy>())
return *Val.getAddrOfPtr1();
return *Val.template get<VecTy*>();
}
// implicit conversion operator to MutableArrayRef.
operator MutableArrayRef<EltTy>() {
if (Val.isNull())
return None;
if (Val.template is<EltTy>())
return *Val.getAddrOfPtr1();
return *Val.template get<VecTy*>();
}
// Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*.
template<typename U,
typename std::enable_if<
std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
bool>::type = false>
operator ArrayRef<U>() const {
return operator ArrayRef<EltTy>();
}
bool empty() const {
// This vector can be empty if it contains no element, or if it
// contains a pointer to an empty vector.
if (Val.isNull()) return true;
if (VecTy *Vec = Val.template dyn_cast<VecTy*>())
return Vec->empty();
return false;
}
unsigned size() const {
if (empty())
return 0;
if (Val.template is<EltTy>())
return 1;
return Val.template get<VecTy*>()->size();
}
typedef EltTy *iterator;
typedef const EltTy *const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
iterator begin() {
if (Val.template is<EltTy>())
return Val.getAddrOfPtr1();
return Val.template get<VecTy *>()->begin();
}
iterator end() {
if (Val.template is<EltTy>())
return begin() + (Val.isNull() ? 0 : 1);
return Val.template get<VecTy *>()->end();
}
const_iterator begin() const {
return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
}
const_iterator end() const {
return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
}
reverse_iterator rbegin() { return reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
EltTy operator[](unsigned i) const {
assert(!Val.isNull() && "can't index into an empty vector");
if (EltTy V = Val.template dyn_cast<EltTy>()) {
assert(i == 0 && "tinyvector index out of range");
return V;
}
assert(i < Val.template get<VecTy*>()->size() &&
"tinyvector index out of range");
return (*Val.template get<VecTy*>())[i];
}
EltTy front() const {
assert(!empty() && "vector empty");
if (EltTy V = Val.template dyn_cast<EltTy>())
return V;
return Val.template get<VecTy*>()->front();
}
EltTy back() const {
assert(!empty() && "vector empty");
if (EltTy V = Val.template dyn_cast<EltTy>())
return V;
return Val.template get<VecTy*>()->back();
}
void push_back(EltTy NewVal) {
assert(NewVal && "Can't add a null value");
// If we have nothing, add something.
if (Val.isNull()) {
Val = NewVal;
return;
}
// If we have a single value, convert to a vector.
if (EltTy V = Val.template dyn_cast<EltTy>()) {
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
// Add the new value, we know we have a vector.
Val.template get<VecTy*>()->push_back(NewVal);
}
void pop_back() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>())
Val = (EltTy)nullptr;
else if (VecTy *Vec = Val.template get<VecTy*>())
Vec->pop_back();
}
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// If we have a vector form, just clear it.
Vec->clear();
}
// Otherwise, we're already empty.
}
iterator erase(iterator I) {
assert(I >= begin() && "Iterator to erase is out of bounds.");
assert(I < end() && "Erasing at past-the-end iterator.");
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// multiple items in a vector; just do the erase, there is no
// benefit to collapsing back to a pointer
return Vec->erase(I);
}
return end();
}
iterator erase(iterator S, iterator E) {
assert(S >= begin() && "Range to erase is out of bounds.");
assert(S <= E && "Trying to erase invalid range.");
assert(E <= end() && "Trying to erase past the end.");
if (Val.template is<EltTy>()) {
if (S == begin() && S != E)
Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
return Vec->erase(S, E);
}
return end();
}
iterator insert(iterator I, const EltTy &Elt) {
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (I == end()) {
push_back(Elt);
return std::prev(end());
}
assert(!Val.isNull() && "Null value with non-end insert iterator.");
if (EltTy V = Val.template dyn_cast<EltTy>()) {
assert(I == begin());
Val = Elt;
push_back(V);
return begin();
}
return Val.template get<VecTy*>()->insert(I, Elt);
}
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (From == To)
return I;
// If we have a single value, convert to a vector.
ptrdiff_t Offset = I - begin();
if (Val.isNull()) {
if (std::next(From) == To) {
Val = *From;
return begin();
}
Val = new VecTy();
} else if (EltTy V = Val.template dyn_cast<EltTy>()) {
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
}
};
} // end namespace llvm
#endif

View File

@ -1,727 +0,0 @@
//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_TRIPLE_H
#define LLVM_ADT_TRIPLE_H
#include "llvm/ADT/Twine.h"
// Some system headers or GCC predefined macros conflict with identifiers in
// this file. Undefine them here.
#undef NetBSD
#undef mips
#undef sparc
namespace llvm {
/// Triple - Helper class for working with autoconf configuration names. For
/// historical reasons, we also call these 'triples' (they used to contain
/// exactly three fields).
///
/// Configuration names are strings in the canonical form:
/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM
/// or
/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
///
/// This class is used for clients which want to support arbitrary
/// configuration names, but also want to implement certain special
/// behavior for particular configurations. This class isolates the mapping
/// from the components of the configuration name to well known IDs.
///
/// At its core the Triple class is designed to be a wrapper for a triple
/// string; the constructor does not change or normalize the triple string.
/// Clients that need to handle the non-canonical triples that users often
/// specify should use the normalize method.
///
/// See autoconf/config.guess for a glimpse into what configuration names
/// look like in practice.
class Triple {
public:
enum ArchType {
UnknownArch,
arm, // ARM (little endian): arm, armv.*, xscale
armeb, // ARM (big endian): armeb
aarch64, // AArch64 (little endian): aarch64
aarch64_be, // AArch64 (big endian): aarch64_be
avr, // AVR: Atmel AVR microcontroller
bpfel, // eBPF or extended BPF or 64-bit BPF (little endian)
bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
hexagon, // Hexagon: hexagon
mips, // MIPS: mips, mipsallegrex
mipsel, // MIPSEL: mipsel, mipsallegrexel
mips64, // MIPS64: mips64
mips64el, // MIPS64EL: mips64el
msp430, // MSP430: msp430
ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu
ppc64le, // PPC64LE: powerpc64le
r600, // R600: AMD GPUs HD2XXX - HD6XXX
amdgcn, // AMDGCN: AMD GCN GPUs
sparc, // Sparc: sparc
sparcv9, // Sparcv9: Sparcv9
sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
systemz, // SystemZ: s390x
tce, // TCE (http://tce.cs.tut.fi/): tce
thumb, // Thumb (little endian): thumb, thumbv.*
thumbeb, // Thumb (big endian): thumbeb
x86, // X86: i[3-9]86
x86_64, // X86-64: amd64, x86_64
xcore, // XCore: xcore
nvptx, // NVPTX: 32-bit
nvptx64, // NVPTX: 64-bit
le32, // le32: generic little-endian 32-bit CPU (PNaCl)
le64, // le64: generic little-endian 64-bit CPU (PNaCl)
amdil, // AMDIL
amdil64, // AMDIL with 64-bit pointers
hsail, // AMD HSAIL
hsail64, // AMD HSAIL with 64-bit pointers
spir, // SPIR: standard portable IR for OpenCL 32-bit version
spir64, // SPIR: standard portable IR for OpenCL 64-bit version
kalimba, // Kalimba: generic kalimba
shave, // SHAVE: Movidius vector VLIW processors
lanai, // Lanai: Lanai 32-bit
wasm32, // WebAssembly with 32-bit pointers
wasm64, // WebAssembly with 64-bit pointers
renderscript32, // 32-bit RenderScript
renderscript64, // 64-bit RenderScript
LastArchType = renderscript64
};
enum SubArchType {
NoSubArch,
ARMSubArch_v8_2a,
ARMSubArch_v8_1a,
ARMSubArch_v8,
ARMSubArch_v8m_baseline,
ARMSubArch_v8m_mainline,
ARMSubArch_v7,
ARMSubArch_v7em,
ARMSubArch_v7m,
ARMSubArch_v7s,
ARMSubArch_v7k,
ARMSubArch_v6,
ARMSubArch_v6m,
ARMSubArch_v6k,
ARMSubArch_v6t2,
ARMSubArch_v5,
ARMSubArch_v5te,
ARMSubArch_v4t,
KalimbaSubArch_v3,
KalimbaSubArch_v4,
KalimbaSubArch_v5
};
enum VendorType {
UnknownVendor,
Apple,
PC,
SCEI,
BGP,
BGQ,
Freescale,
IBM,
ImaginationTechnologies,
MipsTechnologies,
NVIDIA,
CSR,
Myriad,
AMD,
Mesa,
LastVendorType = Mesa
};
enum OSType {
UnknownOS,
CloudABI,
Darwin,
DragonFly,
FreeBSD,
IOS,
KFreeBSD,
Linux,
Lv2, // PS3
MacOSX,
NetBSD,
OpenBSD,
Solaris,
Win32,
Haiku,
Minix,
RTEMS,
NaCl, // Native Client
CNK, // BG/P Compute-Node Kernel
Bitrig,
AIX,
CUDA, // NVIDIA CUDA
NVCL, // NVIDIA OpenCL
AMDHSA, // AMD HSA Runtime
PS4,
ELFIAMCU,
TvOS, // Apple tvOS
WatchOS, // Apple watchOS
Mesa3D,
LastOSType = Mesa3D
};
enum EnvironmentType {
UnknownEnvironment,
GNU,
GNUABI64,
GNUEABI,
GNUEABIHF,
GNUX32,
CODE16,
EABI,
EABIHF,
Android,
Musl,
MuslEABI,
MuslEABIHF,
MSVC,
Itanium,
Cygnus,
AMDOpenCL,
CoreCLR,
LastEnvironmentType = CoreCLR
};
enum ObjectFormatType {
UnknownObjectFormat,
COFF,
ELF,
MachO,
};
private:
std::string Data;
/// The parsed arch type.
ArchType Arch;
/// The parsed subarchitecture type.
SubArchType SubArch;
/// The parsed vendor type.
VendorType Vendor;
/// The parsed OS type.
OSType OS;
/// The parsed Environment type.
EnvironmentType Environment;
/// The object format type.
ObjectFormatType ObjectFormat;
public:
/// @name Constructors
/// @{
/// Default constructor is the same as an empty string and leaves all
/// triple fields unknown.
Triple() : Data(), Arch(), Vendor(), OS(), Environment(), ObjectFormat() {}
explicit Triple(const Twine &Str);
Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
const Twine &EnvironmentStr);
bool operator==(const Triple &Other) const {
return Arch == Other.Arch && SubArch == Other.SubArch &&
Vendor == Other.Vendor && OS == Other.OS &&
Environment == Other.Environment &&
ObjectFormat == Other.ObjectFormat;
}
/// @}
/// @name Normalization
/// @{
/// normalize - Turn an arbitrary machine specification into the canonical
/// triple form (or something sensible that the Triple class understands if
/// nothing better can reasonably be done). In particular, it handles the
/// common case in which otherwise valid components are in the wrong order.
static std::string normalize(StringRef Str);
/// Return the normalized form of this triple's string.
std::string normalize() const { return normalize(Data); }
/// @}
/// @name Typed Component Access
/// @{
/// getArch - Get the parsed architecture type of this triple.
ArchType getArch() const { return Arch; }
/// getSubArch - get the parsed subarchitecture type for this triple.
SubArchType getSubArch() const { return SubArch; }
/// getVendor - Get the parsed vendor type of this triple.
VendorType getVendor() const { return Vendor; }
/// getOS - Get the parsed operating system type of this triple.
OSType getOS() const { return OS; }
/// hasEnvironment - Does this triple have the optional environment
/// (fourth) component?
bool hasEnvironment() const {
return getEnvironmentName() != "";
}
/// getEnvironment - Get the parsed environment type of this triple.
EnvironmentType getEnvironment() const { return Environment; }
/// Parse the version number from the OS name component of the
/// triple, if present.
///
/// For example, "fooos1.2.3" would return (1, 2, 3).
///
/// If an entry is not defined, it will be returned as 0.
void getEnvironmentVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
/// getFormat - Get the object format for this triple.
ObjectFormatType getObjectFormat() const { return ObjectFormat; }
/// getOSVersion - Parse the version number from the OS name component of the
/// triple, if present.
///
/// For example, "fooos1.2.3" would return (1, 2, 3).
///
/// If an entry is not defined, it will be returned as 0.
void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
/// getOSMajorVersion - Return just the major version number, this is
/// specialized because it is a common query.
unsigned getOSMajorVersion() const {
unsigned Maj, Min, Micro;
getOSVersion(Maj, Min, Micro);
return Maj;
}
/// getMacOSXVersion - Parse the version number as with getOSVersion and then
/// translate generic "darwin" versions to the corresponding OS X versions.
/// This may also be called with IOS triples but the OS X version number is
/// just set to a constant 10.4.0 in that case. Returns true if successful.
bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
/// getiOSVersion - Parse the version number as with getOSVersion. This should
/// only be called with IOS or generic triples.
void getiOSVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
/// getWatchOSVersion - Parse the version number as with getOSVersion. This
/// should only be called with WatchOS or generic triples.
void getWatchOSVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
/// @}
/// @name Direct Component Access
/// @{
const std::string &str() const { return Data; }
const std::string &getTriple() const { return Data; }
/// getArchName - Get the architecture (first) component of the
/// triple.
StringRef getArchName() const;
/// getVendorName - Get the vendor (second) component of the triple.
StringRef getVendorName() const;
/// getOSName - Get the operating system (third) component of the
/// triple.
StringRef getOSName() const;
/// getEnvironmentName - Get the optional environment (fourth)
/// component of the triple, or "" if empty.
StringRef getEnvironmentName() const;
/// getOSAndEnvironmentName - Get the operating system and optional
/// environment components as a single string (separated by a '-'
/// if the environment component is present).
StringRef getOSAndEnvironmentName() const;
/// @}
/// @name Convenience Predicates
/// @{
/// Test whether the architecture is 64-bit
///
/// Note that this tests for 64-bit pointer width, and nothing else. Note
/// that we intentionally expose only three predicates, 64-bit, 32-bit, and
/// 16-bit. The inner details of pointer width for particular architectures
/// is not summed up in the triple, and so only a coarse grained predicate
/// system is provided.
bool isArch64Bit() const;
/// Test whether the architecture is 32-bit
///
/// Note that this tests for 32-bit pointer width, and nothing else.
bool isArch32Bit() const;
/// Test whether the architecture is 16-bit
///
/// Note that this tests for 16-bit pointer width, and nothing else.
bool isArch16Bit() const;
/// isOSVersionLT - Helper function for doing comparisons against version
/// numbers included in the target triple.
bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
unsigned Micro = 0) const {
unsigned LHS[3];
getOSVersion(LHS[0], LHS[1], LHS[2]);
if (LHS[0] != Major)
return LHS[0] < Major;
if (LHS[1] != Minor)
return LHS[1] < Minor;
if (LHS[2] != Micro)
return LHS[1] < Micro;
return false;
}
bool isOSVersionLT(const Triple &Other) const {
unsigned RHS[3];
Other.getOSVersion(RHS[0], RHS[1], RHS[2]);
return isOSVersionLT(RHS[0], RHS[1], RHS[2]);
}
/// isMacOSXVersionLT - Comparison function for checking OS X version
/// compatibility, which handles supporting skewed version numbering schemes
/// used by the "darwin" triples.
bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
unsigned Micro = 0) const {
assert(isMacOSX() && "Not an OS X triple!");
// If this is OS X, expect a sane version number.
if (getOS() == Triple::MacOSX)
return isOSVersionLT(Major, Minor, Micro);
// Otherwise, compare to the "Darwin" number.
assert(Major == 10 && "Unexpected major version");
return isOSVersionLT(Minor + 4, Micro, 0);
}
/// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both
/// "darwin" and "osx" as OS X triples.
bool isMacOSX() const {
return getOS() == Triple::Darwin || getOS() == Triple::MacOSX;
}
/// Is this an iOS triple.
/// Note: This identifies tvOS as a variant of iOS. If that ever
/// changes, i.e., if the two operating systems diverge or their version
/// numbers get out of sync, that will need to be changed.
/// watchOS has completely different version numbers so it is not included.
bool isiOS() const {
return getOS() == Triple::IOS || isTvOS();
}
/// Is this an Apple tvOS triple.
bool isTvOS() const {
return getOS() == Triple::TvOS;
}
/// Is this an Apple watchOS triple.
bool isWatchOS() const {
return getOS() == Triple::WatchOS;
}
bool isWatchABI() const {
return getSubArch() == Triple::ARMSubArch_v7k;
}
/// isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
bool isOSDarwin() const {
return isMacOSX() || isiOS() || isWatchOS();
}
bool isOSNetBSD() const {
return getOS() == Triple::NetBSD;
}
bool isOSOpenBSD() const {
return getOS() == Triple::OpenBSD;
}
bool isOSFreeBSD() const {
return getOS() == Triple::FreeBSD;
}
bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }
bool isOSSolaris() const {
return getOS() == Triple::Solaris;
}
bool isOSBitrig() const {
return getOS() == Triple::Bitrig;
}
bool isOSIAMCU() const {
return getOS() == Triple::ELFIAMCU;
}
bool isGNUEnvironment() const {
EnvironmentType Env = getEnvironment();
return Env == Triple::GNU || Env == Triple::GNUABI64 ||
Env == Triple::GNUEABI || Env == Triple::GNUEABIHF ||
Env == Triple::GNUX32;
}
/// Checks if the environment could be MSVC.
bool isWindowsMSVCEnvironment() const {
return getOS() == Triple::Win32 &&
(getEnvironment() == Triple::UnknownEnvironment ||
getEnvironment() == Triple::MSVC);
}
/// Checks if the environment is MSVC.
bool isKnownWindowsMSVCEnvironment() const {
return getOS() == Triple::Win32 && getEnvironment() == Triple::MSVC;
}
bool isWindowsCoreCLREnvironment() const {
return getOS() == Triple::Win32 && getEnvironment() == Triple::CoreCLR;
}
bool isWindowsItaniumEnvironment() const {
return getOS() == Triple::Win32 && getEnvironment() == Triple::Itanium;
}
bool isWindowsCygwinEnvironment() const {
return getOS() == Triple::Win32 && getEnvironment() == Triple::Cygnus;
}
bool isWindowsGNUEnvironment() const {
return getOS() == Triple::Win32 && getEnvironment() == Triple::GNU;
}
/// Tests for either Cygwin or MinGW OS
bool isOSCygMing() const {
return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment();
}
/// Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSMSVCRT() const {
return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() ||
isWindowsItaniumEnvironment();
}
/// Tests whether the OS is Windows.
bool isOSWindows() const {
return getOS() == Triple::Win32;
}
/// Tests whether the OS is NaCl (Native Client)
bool isOSNaCl() const {
return getOS() == Triple::NaCl;
}
/// Tests whether the OS is Linux.
bool isOSLinux() const {
return getOS() == Triple::Linux;
}
/// Tests whether the OS is kFreeBSD.
bool isOSKFreeBSD() const {
return getOS() == Triple::KFreeBSD;
}
/// Tests whether the OS uses glibc.
bool isOSGlibc() const {
return getOS() == Triple::Linux || getOS() == Triple::KFreeBSD;
}
/// Tests whether the OS uses the ELF binary format.
bool isOSBinFormatELF() const {
return getObjectFormat() == Triple::ELF;
}
/// Tests whether the OS uses the COFF binary format.
bool isOSBinFormatCOFF() const {
return getObjectFormat() == Triple::COFF;
}
/// Tests whether the environment is MachO.
bool isOSBinFormatMachO() const {
return getObjectFormat() == Triple::MachO;
}
/// Tests whether the target is the PS4 CPU
bool isPS4CPU() const {
return getArch() == Triple::x86_64 &&
getVendor() == Triple::SCEI &&
getOS() == Triple::PS4;
}
/// Tests whether the target is the PS4 platform
bool isPS4() const {
return getVendor() == Triple::SCEI &&
getOS() == Triple::PS4;
}
/// Tests whether the target is Android
bool isAndroid() const { return getEnvironment() == Triple::Android; }
/// Tests whether the environment is musl-libc
bool isMusl() const {
return getEnvironment() == Triple::Musl ||
getEnvironment() == Triple::MuslEABI ||
getEnvironment() == Triple::MuslEABIHF;
}
/// Tests whether the target is NVPTX (32- or 64-bit).
bool isNVPTX() const {
return getArch() == Triple::nvptx || getArch() == Triple::nvptx64;
}
/// Tests wether the target supports comdat
bool supportsCOMDAT() const { return !isOSBinFormatMachO(); }
/// @}
/// @name Mutators
/// @{
/// setArch - Set the architecture (first) component of the triple
/// to a known type.
void setArch(ArchType Kind);
/// setVendor - Set the vendor (second) component of the triple to a
/// known type.
void setVendor(VendorType Kind);
/// setOS - Set the operating system (third) component of the triple
/// to a known type.
void setOS(OSType Kind);
/// setEnvironment - Set the environment (fourth) component of the triple
/// to a known type.
void setEnvironment(EnvironmentType Kind);
/// setObjectFormat - Set the object file format
void setObjectFormat(ObjectFormatType Kind);
/// setTriple - Set all components to the new triple \p Str.
void setTriple(const Twine &Str);
/// setArchName - Set the architecture (first) component of the
/// triple by name.
void setArchName(StringRef Str);
/// setVendorName - Set the vendor (second) component of the triple
/// by name.
void setVendorName(StringRef Str);
/// setOSName - Set the operating system (third) component of the
/// triple by name.
void setOSName(StringRef Str);
/// setEnvironmentName - Set the optional environment (fourth)
/// component of the triple by name.
void setEnvironmentName(StringRef Str);
/// setOSAndEnvironmentName - Set the operating system and optional
/// environment components with a single string.
void setOSAndEnvironmentName(StringRef Str);
/// @}
/// @name Helpers to build variants of a particular triple.
/// @{
/// Form a triple with a 32-bit variant of the current architecture.
///
/// This can be used to move across "families" of architectures where useful.
///
/// \returns A new triple with a 32-bit architecture or an unknown
/// architecture if no such variant can be found.
llvm::Triple get32BitArchVariant() const;
/// Form a triple with a 64-bit variant of the current architecture.
///
/// This can be used to move across "families" of architectures where useful.
///
/// \returns A new triple with a 64-bit architecture or an unknown
/// architecture if no such variant can be found.
llvm::Triple get64BitArchVariant() const;
/// Form a triple with a big endian variant of the current architecture.
///
/// This can be used to move across "families" of architectures where useful.
///
/// \returns A new triple with a big endian architecture or an unknown
/// architecture if no such variant can be found.
llvm::Triple getBigEndianArchVariant() const;
/// Form a triple with a little endian variant of the current architecture.
///
/// This can be used to move across "families" of architectures where useful.
///
/// \returns A new triple with a little endian architecture or an unknown
/// architecture if no such variant can be found.
llvm::Triple getLittleEndianArchVariant() const;
/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
///
/// \param Arch the architecture name (e.g., "armv7s"). If it is an empty
/// string then the triple's arch name is used.
StringRef getARMCPUForArch(StringRef Arch = StringRef()) const;
/// Tests whether the target triple is little endian.
///
/// \returns true if the triple is little endian, false otherwise.
bool isLittleEndian() const;
/// @}
/// @name Static helpers for IDs.
/// @{
/// getArchTypeName - Get the canonical name for the \p Kind architecture.
static const char *getArchTypeName(ArchType Kind);
/// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind
/// architecture. This is the prefix used by the architecture specific
/// builtins, and is suitable for passing to \see
/// Intrinsic::getIntrinsicForGCCBuiltin().
///
/// \return - The architecture prefix, or 0 if none is defined.
static const char *getArchTypePrefix(ArchType Kind);
/// getVendorTypeName - Get the canonical name for the \p Kind vendor.
static const char *getVendorTypeName(VendorType Kind);
/// getOSTypeName - Get the canonical name for the \p Kind operating system.
static const char *getOSTypeName(OSType Kind);
/// getEnvironmentTypeName - Get the canonical name for the \p Kind
/// environment.
static const char *getEnvironmentTypeName(EnvironmentType Kind);
/// @}
/// @name Static helpers for converting alternate architecture names.
/// @{
/// getArchTypeForLLVMName - The canonical type for the given LLVM
/// architecture name (e.g., "x86").
static ArchType getArchTypeForLLVMName(StringRef Str);
/// @}
};
} // End llvm namespace
#endif

View File

@ -1,540 +0,0 @@
//===-- Twine.h - Fast Temporary String Concatenation -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_TWINE_H
#define LLVM_ADT_TWINE_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <string>
namespace llvm {
class raw_ostream;
/// Twine - A lightweight data structure for efficiently representing the
/// concatenation of temporary values as strings.
///
/// A Twine is a kind of rope, it represents a concatenated string using a
/// binary-tree, where the string is the preorder of the nodes. Since the
/// Twine can be efficiently rendered into a buffer when its result is used,
/// it avoids the cost of generating temporary values for intermediate string
/// results -- particularly in cases when the Twine result is never
/// required. By explicitly tracking the type of leaf nodes, we can also avoid
/// the creation of temporary strings for conversions operations (such as
/// appending an integer to a string).
///
/// A Twine is not intended for use directly and should not be stored, its
/// implementation relies on the ability to store pointers to temporary stack
/// objects which may be deallocated at the end of a statement. Twines should
/// only be used accepted as const references in arguments, when an API wishes
/// to accept possibly-concatenated strings.
///
/// Twines support a special 'null' value, which always concatenates to form
/// itself, and renders as an empty string. This can be returned from APIs to
/// effectively nullify any concatenations performed on the result.
///
/// \b Implementation
///
/// Given the nature of a Twine, it is not possible for the Twine's
/// concatenation method to construct interior nodes; the result must be
/// represented inside the returned value. For this reason a Twine object
/// actually holds two values, the left- and right-hand sides of a
/// concatenation. We also have nullary Twine objects, which are effectively
/// sentinel values that represent empty strings.
///
/// Thus, a Twine can effectively have zero, one, or two children. The \see
/// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
/// testing the number of children.
///
/// We maintain a number of invariants on Twine objects (FIXME: Why):
/// - Nullary twines are always represented with their Kind on the left-hand
/// side, and the Empty kind on the right-hand side.
/// - Unary twines are always represented with the value on the left-hand
/// side, and the Empty kind on the right-hand side.
/// - If a Twine has another Twine as a child, that child should always be
/// binary (otherwise it could have been folded into the parent).
///
/// These invariants are check by \see isValid().
///
/// \b Efficiency Considerations
///
/// The Twine is designed to yield efficient and small code for common
/// situations. For this reason, the concat() method is inlined so that
/// concatenations of leaf nodes can be optimized into stores directly into a
/// single stack allocated object.
///
/// In practice, not all compilers can be trusted to optimize concat() fully,
/// so we provide two additional methods (and accompanying operator+
/// overloads) to guarantee that particularly important cases (cstring plus
/// StringRef) codegen as desired.
class Twine {
/// NodeKind - Represent the type of an argument.
enum NodeKind : unsigned char {
/// An empty string; the result of concatenating anything with it is also
/// empty.
NullKind,
/// The empty string.
EmptyKind,
/// A pointer to a Twine instance.
TwineKind,
/// A pointer to a C string instance.
CStringKind,
/// A pointer to an std::string instance.
StdStringKind,
/// A pointer to a StringRef instance.
StringRefKind,
/// A pointer to a SmallString instance.
SmallStringKind,
/// A char value, to render as a character.
CharKind,
/// An unsigned int value, to render as an unsigned decimal integer.
DecUIKind,
/// An int value, to render as a signed decimal integer.
DecIKind,
/// A pointer to an unsigned long value, to render as an unsigned decimal
/// integer.
DecULKind,
/// A pointer to a long value, to render as a signed decimal integer.
DecLKind,
/// A pointer to an unsigned long long value, to render as an unsigned
/// decimal integer.
DecULLKind,
/// A pointer to a long long value, to render as a signed decimal integer.
DecLLKind,
/// A pointer to a uint64_t value, to render as an unsigned hexadecimal
/// integer.
UHexKind
};
union Child
{
const Twine *twine;
const char *cString;
const std::string *stdString;
const StringRef *stringRef;
const SmallVectorImpl<char> *smallString;
char character;
unsigned int decUI;
int decI;
const unsigned long *decUL;
const long *decL;
const unsigned long long *decULL;
const long long *decLL;
const uint64_t *uHex;
};
private:
/// LHS - The prefix in the concatenation, which may be uninitialized for
/// Null or Empty kinds.
Child LHS;
/// RHS - The suffix in the concatenation, which may be uninitialized for
/// Null or Empty kinds.
Child RHS;
/// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
NodeKind LHSKind;
/// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
NodeKind RHSKind;
private:
/// Construct a nullary twine; the kind must be NullKind or EmptyKind.
explicit Twine(NodeKind Kind)
: LHSKind(Kind), RHSKind(EmptyKind) {
assert(isNullary() && "Invalid kind!");
}
/// Construct a binary twine.
explicit Twine(const Twine &LHS, const Twine &RHS)
: LHSKind(TwineKind), RHSKind(TwineKind) {
this->LHS.twine = &LHS;
this->RHS.twine = &RHS;
assert(isValid() && "Invalid twine!");
}
/// Construct a twine from explicit values.
explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
: LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
assert(isValid() && "Invalid twine!");
}
/// Since the intended use of twines is as temporary objects, assignments
/// when concatenating might cause undefined behavior or stack corruptions
Twine &operator=(const Twine &Other) = delete;
/// Check for the null twine.
bool isNull() const {
return getLHSKind() == NullKind;
}
/// Check for the empty twine.
bool isEmpty() const {
return getLHSKind() == EmptyKind;
}
/// Check if this is a nullary twine (null or empty).
bool isNullary() const {
return isNull() || isEmpty();
}
/// Check if this is a unary twine.
bool isUnary() const {
return getRHSKind() == EmptyKind && !isNullary();
}
/// Check if this is a binary twine.
bool isBinary() const {
return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
}
/// Check if this is a valid twine (satisfying the invariants on
/// order and number of arguments).
bool isValid() const {
// Nullary twines always have Empty on the RHS.
if (isNullary() && getRHSKind() != EmptyKind)
return false;
// Null should never appear on the RHS.
if (getRHSKind() == NullKind)
return false;
// The RHS cannot be non-empty if the LHS is empty.
if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
return false;
// A twine child should always be binary.
if (getLHSKind() == TwineKind &&
!LHS.twine->isBinary())
return false;
if (getRHSKind() == TwineKind &&
!RHS.twine->isBinary())
return false;
return true;
}
/// Get the NodeKind of the left-hand side.
NodeKind getLHSKind() const { return LHSKind; }
/// Get the NodeKind of the right-hand side.
NodeKind getRHSKind() const { return RHSKind; }
/// Print one child from a twine.
void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
/// Print the representation of one child from a twine.
void printOneChildRepr(raw_ostream &OS, Child Ptr,
NodeKind Kind) const;
public:
/// @name Constructors
/// @{
/// Construct from an empty string.
/*implicit*/ Twine() : LHSKind(EmptyKind), RHSKind(EmptyKind) {
assert(isValid() && "Invalid twine!");
}
Twine(const Twine &) = default;
/// Construct from a C string.
///
/// We take care here to optimize "" into the empty twine -- this will be
/// optimized out for string constants. This allows Twine arguments have
/// default "" values, without introducing unnecessary string constants.
/*implicit*/ Twine(const char *Str)
: RHSKind(EmptyKind) {
if (Str[0] != '\0') {
LHS.cString = Str;
LHSKind = CStringKind;
} else
LHSKind = EmptyKind;
assert(isValid() && "Invalid twine!");
}
/// Construct from an std::string.
/*implicit*/ Twine(const std::string &Str)
: LHSKind(StdStringKind), RHSKind(EmptyKind) {
LHS.stdString = &Str;
assert(isValid() && "Invalid twine!");
}
/// Construct from a StringRef.
/*implicit*/ Twine(const StringRef &Str)
: LHSKind(StringRefKind), RHSKind(EmptyKind) {
LHS.stringRef = &Str;
assert(isValid() && "Invalid twine!");
}
/// Construct from a SmallString.
/*implicit*/ Twine(const SmallVectorImpl<char> &Str)
: LHSKind(SmallStringKind), RHSKind(EmptyKind) {
LHS.smallString = &Str;
assert(isValid() && "Invalid twine!");
}
/// Construct from a char.
explicit Twine(char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
LHS.character = Val;
}
/// Construct from a signed char.
explicit Twine(signed char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
LHS.character = static_cast<char>(Val);
}
/// Construct from an unsigned char.
explicit Twine(unsigned char Val)
: LHSKind(CharKind), RHSKind(EmptyKind) {
LHS.character = static_cast<char>(Val);
}
/// Construct a twine to print \p Val as an unsigned decimal integer.
explicit Twine(unsigned Val)
: LHSKind(DecUIKind), RHSKind(EmptyKind) {
LHS.decUI = Val;
}
/// Construct a twine to print \p Val as a signed decimal integer.
explicit Twine(int Val)
: LHSKind(DecIKind), RHSKind(EmptyKind) {
LHS.decI = Val;
}
/// Construct a twine to print \p Val as an unsigned decimal integer.
explicit Twine(const unsigned long &Val)
: LHSKind(DecULKind), RHSKind(EmptyKind) {
LHS.decUL = &Val;
}
/// Construct a twine to print \p Val as a signed decimal integer.
explicit Twine(const long &Val)
: LHSKind(DecLKind), RHSKind(EmptyKind) {
LHS.decL = &Val;
}
/// Construct a twine to print \p Val as an unsigned decimal integer.
explicit Twine(const unsigned long long &Val)
: LHSKind(DecULLKind), RHSKind(EmptyKind) {
LHS.decULL = &Val;
}
/// Construct a twine to print \p Val as a signed decimal integer.
explicit Twine(const long long &Val)
: LHSKind(DecLLKind), RHSKind(EmptyKind) {
LHS.decLL = &Val;
}
// FIXME: Unfortunately, to make sure this is as efficient as possible we
// need extra binary constructors from particular types. We can't rely on
// the compiler to be smart enough to fold operator+()/concat() down to the
// right thing. Yet.
/// Construct as the concatenation of a C string and a StringRef.
/*implicit*/ Twine(const char *LHS, const StringRef &RHS)
: LHSKind(CStringKind), RHSKind(StringRefKind) {
this->LHS.cString = LHS;
this->RHS.stringRef = &RHS;
assert(isValid() && "Invalid twine!");
}
/// Construct as the concatenation of a StringRef and a C string.
/*implicit*/ Twine(const StringRef &LHS, const char *RHS)
: LHSKind(StringRefKind), RHSKind(CStringKind) {
this->LHS.stringRef = &LHS;
this->RHS.cString = RHS;
assert(isValid() && "Invalid twine!");
}
/// Create a 'null' string, which is an empty string that always
/// concatenates to form another empty string.
static Twine createNull() {
return Twine(NullKind);
}
/// @}
/// @name Numeric Conversions
/// @{
// Construct a twine to print \p Val as an unsigned hexadecimal integer.
static Twine utohexstr(const uint64_t &Val) {
Child LHS, RHS;
LHS.uHex = &Val;
RHS.twine = nullptr;
return Twine(LHS, UHexKind, RHS, EmptyKind);
}
/// @}
/// @name Predicate Operations
/// @{
/// Check if this twine is trivially empty; a false return value does not
/// necessarily mean the twine is empty.
bool isTriviallyEmpty() const {
return isNullary();
}
/// Return true if this twine can be dynamically accessed as a single
/// StringRef value with getSingleStringRef().
bool isSingleStringRef() const {
if (getRHSKind() != EmptyKind) return false;
switch (getLHSKind()) {
case EmptyKind:
case CStringKind:
case StdStringKind:
case StringRefKind:
case SmallStringKind:
return true;
default:
return false;
}
}
/// @}
/// @name String Operations
/// @{
Twine concat(const Twine &Suffix) const;
/// @}
/// @name Output & Conversion.
/// @{
/// Return the twine contents as a std::string.
std::string str() const;
/// Append the concatenated string into the given SmallString or SmallVector.
void toVector(SmallVectorImpl<char> &Out) const;
/// This returns the twine as a single StringRef. This method is only valid
/// if isSingleStringRef() is true.
StringRef getSingleStringRef() const {
assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
switch (getLHSKind()) {
default: llvm_unreachable("Out of sync with isSingleStringRef");
case EmptyKind: return StringRef();
case CStringKind: return StringRef(LHS.cString);
case StdStringKind: return StringRef(*LHS.stdString);
case StringRefKind: return *LHS.stringRef;
case SmallStringKind:
return StringRef(LHS.smallString->data(), LHS.smallString->size());
}
}
/// This returns the twine as a single StringRef if it can be
/// represented as such. Otherwise the twine is written into the given
/// SmallVector and a StringRef to the SmallVector's data is returned.
StringRef toStringRef(SmallVectorImpl<char> &Out) const {
if (isSingleStringRef())
return getSingleStringRef();
toVector(Out);
return StringRef(Out.data(), Out.size());
}
/// This returns the twine as a single null terminated StringRef if it
/// can be represented as such. Otherwise the twine is written into the
/// given SmallVector and a StringRef to the SmallVector's data is returned.
///
/// The returned StringRef's size does not include the null terminator.
StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
/// Write the concatenated string represented by this twine to the
/// stream \p OS.
void print(raw_ostream &OS) const;
/// Dump the concatenated string represented by this twine to stderr.
void dump() const;
/// Write the representation of this twine to the stream \p OS.
void printRepr(raw_ostream &OS) const;
/// Dump the representation of this twine to stderr.
void dumpRepr() const;
/// @}
};
/// @name Twine Inline Implementations
/// @{
inline Twine Twine::concat(const Twine &Suffix) const {
// Concatenation with null is null.
if (isNull() || Suffix.isNull())
return Twine(NullKind);
// Concatenation with empty yields the other side.
if (isEmpty())
return Suffix;
if (Suffix.isEmpty())
return *this;
// Otherwise we need to create a new node, taking care to fold in unary
// twines.
Child NewLHS, NewRHS;
NewLHS.twine = this;
NewRHS.twine = &Suffix;
NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
if (isUnary()) {
NewLHS = LHS;
NewLHSKind = getLHSKind();
}
if (Suffix.isUnary()) {
NewRHS = Suffix.LHS;
NewRHSKind = Suffix.getLHSKind();
}
return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
}
inline Twine operator+(const Twine &LHS, const Twine &RHS) {
return LHS.concat(RHS);
}
/// Additional overload to guarantee simplified codegen; this is equivalent to
/// concat().
inline Twine operator+(const char *LHS, const StringRef &RHS) {
return Twine(LHS, RHS);
}
/// Additional overload to guarantee simplified codegen; this is equivalent to
/// concat().
inline Twine operator+(const StringRef &LHS, const char *RHS) {
return Twine(LHS, RHS);
}
inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
RHS.print(OS);
return OS;
}
/// @}
}
#endif

View File

@ -1,107 +0,0 @@
//===-- llvm/ADT/UniqueVector.h ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_UNIQUEVECTOR_H
#define LLVM_ADT_UNIQUEVECTOR_H
#include <cassert>
#include <cstddef>
#include <map>
#include <vector>
namespace llvm {
//===----------------------------------------------------------------------===//
/// UniqueVector - This class produces a sequential ID number (base 1) for each
/// unique entry that is added. T is the type of entries in the vector. This
/// class should have an implementation of operator== and of operator<.
/// Entries can be fetched using operator[] with the entry ID.
template<class T> class UniqueVector {
public:
typedef typename std::vector<T> VectorType;
typedef typename VectorType::iterator iterator;
typedef typename VectorType::const_iterator const_iterator;
private:
// Map - Used to handle the correspondence of entry to ID.
std::map<T, unsigned> Map;
// Vector - ID ordered vector of entries. Entries can be indexed by ID - 1.
//
VectorType Vector;
public:
/// insert - Append entry to the vector if it doesn't already exist. Returns
/// the entry's index + 1 to be used as a unique ID.
unsigned insert(const T &Entry) {
// Check if the entry is already in the map.
unsigned &Val = Map[Entry];
// See if entry exists, if so return prior ID.
if (Val) return Val;
// Compute ID for entry.
Val = static_cast<unsigned>(Vector.size()) + 1;
// Insert in vector.
Vector.push_back(Entry);
return Val;
}
/// idFor - return the ID for an existing entry. Returns 0 if the entry is
/// not found.
unsigned idFor(const T &Entry) const {
// Search for entry in the map.
typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry);
// See if entry exists, if so return ID.
if (MI != Map.end()) return MI->second;
// No luck.
return 0;
}
/// operator[] - Returns a reference to the entry with the specified ID.
///
const T &operator[](unsigned ID) const {
assert(ID-1 < size() && "ID is 0 or out of range!");
return Vector[ID - 1];
}
/// \brief Return an iterator to the start of the vector.
iterator begin() { return Vector.begin(); }
/// \brief Return an iterator to the start of the vector.
const_iterator begin() const { return Vector.begin(); }
/// \brief Return an iterator to the end of the vector.
iterator end() { return Vector.end(); }
/// \brief Return an iterator to the end of the vector.
const_iterator end() const { return Vector.end(); }
/// size - Returns the number of entries in the vector.
///
size_t size() const { return Vector.size(); }
/// empty - Returns true if the vector is empty.
///
bool empty() const { return Vector.empty(); }
/// reset - Clears all the entries.
///
void reset() {
Map.clear();
Vector.resize(0, 0);
}
};
} // End of namespace llvm
#endif // LLVM_ADT_UNIQUEVECTOR_H

View File

@ -1,331 +0,0 @@
//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements compile-time type-safe variadic functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_VARIADICFUNCTION_H
#define LLVM_ADT_VARIADICFUNCTION_H
#include "llvm/ADT/ArrayRef.h"
namespace llvm {
// Define macros to aid in expanding a comma separated series with the index of
// the series pasted onto the last token.
#define LLVM_COMMA_JOIN1(x) x ## 0
#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
/// \brief Class which can simulate a type-safe variadic function.
///
/// The VariadicFunction class template makes it easy to define
/// type-safe variadic functions where all arguments have the same
/// type.
///
/// Suppose we need a variadic function like this:
///
/// ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
///
/// Instead of many overloads of Foo(), we only need to define a helper
/// function that takes an array of arguments:
///
/// ResultT FooImpl(ArrayRef<const ArgT *> Args) {
/// // 'Args[i]' is a pointer to the i-th argument passed to Foo().
/// ...
/// }
///
/// and then define Foo() like this:
///
/// const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
///
/// VariadicFunction takes care of defining the overloads of Foo().
///
/// Actually, Foo is a function object (i.e. functor) instead of a plain
/// function. This object is stateless and its constructor/destructor
/// does nothing, so it's safe to create global objects and call Foo(...) at
/// any time.
///
/// Sometimes we need a variadic function to have some fixed leading
/// arguments whose types may be different from that of the optional
/// arguments. For example:
///
/// bool FullMatch(const StringRef &S, const RE &Regex,
/// const ArgT &A_0, ..., const ArgT &A_N);
///
/// VariadicFunctionN is for such cases, where N is the number of fixed
/// arguments. It is like VariadicFunction, except that it takes N more
/// template arguments for the types of the fixed arguments:
///
/// bool FullMatchImpl(const StringRef &S, const RE &Regex,
/// ArrayRef<const ArgT *> Args) { ... }
/// const VariadicFunction2<bool, const StringRef&,
/// const RE&, ArgT, FullMatchImpl>
/// FullMatch;
///
/// Currently VariadicFunction and friends support up-to 3
/// fixed leading arguments and up-to 32 optional arguments.
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
struct VariadicFunction {
ResultT operator()() const {
return Func(None);
}
#define LLVM_DEFINE_OVERLOAD(N) \
ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
return Func(makeArrayRef(Args)); \
}
LLVM_DEFINE_OVERLOAD(1)
LLVM_DEFINE_OVERLOAD(2)
LLVM_DEFINE_OVERLOAD(3)
LLVM_DEFINE_OVERLOAD(4)
LLVM_DEFINE_OVERLOAD(5)
LLVM_DEFINE_OVERLOAD(6)
LLVM_DEFINE_OVERLOAD(7)
LLVM_DEFINE_OVERLOAD(8)
LLVM_DEFINE_OVERLOAD(9)
LLVM_DEFINE_OVERLOAD(10)
LLVM_DEFINE_OVERLOAD(11)
LLVM_DEFINE_OVERLOAD(12)
LLVM_DEFINE_OVERLOAD(13)
LLVM_DEFINE_OVERLOAD(14)
LLVM_DEFINE_OVERLOAD(15)
LLVM_DEFINE_OVERLOAD(16)
LLVM_DEFINE_OVERLOAD(17)
LLVM_DEFINE_OVERLOAD(18)
LLVM_DEFINE_OVERLOAD(19)
LLVM_DEFINE_OVERLOAD(20)
LLVM_DEFINE_OVERLOAD(21)
LLVM_DEFINE_OVERLOAD(22)
LLVM_DEFINE_OVERLOAD(23)
LLVM_DEFINE_OVERLOAD(24)
LLVM_DEFINE_OVERLOAD(25)
LLVM_DEFINE_OVERLOAD(26)
LLVM_DEFINE_OVERLOAD(27)
LLVM_DEFINE_OVERLOAD(28)
LLVM_DEFINE_OVERLOAD(29)
LLVM_DEFINE_OVERLOAD(30)
LLVM_DEFINE_OVERLOAD(31)
LLVM_DEFINE_OVERLOAD(32)
#undef LLVM_DEFINE_OVERLOAD
};
template <typename ResultT, typename Param0T, typename ArgT,
ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
struct VariadicFunction1 {
ResultT operator()(Param0T P0) const {
return Func(P0, None);
}
#define LLVM_DEFINE_OVERLOAD(N) \
ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
return Func(P0, makeArrayRef(Args)); \
}
LLVM_DEFINE_OVERLOAD(1)
LLVM_DEFINE_OVERLOAD(2)
LLVM_DEFINE_OVERLOAD(3)
LLVM_DEFINE_OVERLOAD(4)
LLVM_DEFINE_OVERLOAD(5)
LLVM_DEFINE_OVERLOAD(6)
LLVM_DEFINE_OVERLOAD(7)
LLVM_DEFINE_OVERLOAD(8)
LLVM_DEFINE_OVERLOAD(9)
LLVM_DEFINE_OVERLOAD(10)
LLVM_DEFINE_OVERLOAD(11)
LLVM_DEFINE_OVERLOAD(12)
LLVM_DEFINE_OVERLOAD(13)
LLVM_DEFINE_OVERLOAD(14)
LLVM_DEFINE_OVERLOAD(15)
LLVM_DEFINE_OVERLOAD(16)
LLVM_DEFINE_OVERLOAD(17)
LLVM_DEFINE_OVERLOAD(18)
LLVM_DEFINE_OVERLOAD(19)
LLVM_DEFINE_OVERLOAD(20)
LLVM_DEFINE_OVERLOAD(21)
LLVM_DEFINE_OVERLOAD(22)
LLVM_DEFINE_OVERLOAD(23)
LLVM_DEFINE_OVERLOAD(24)
LLVM_DEFINE_OVERLOAD(25)
LLVM_DEFINE_OVERLOAD(26)
LLVM_DEFINE_OVERLOAD(27)
LLVM_DEFINE_OVERLOAD(28)
LLVM_DEFINE_OVERLOAD(29)
LLVM_DEFINE_OVERLOAD(30)
LLVM_DEFINE_OVERLOAD(31)
LLVM_DEFINE_OVERLOAD(32)
#undef LLVM_DEFINE_OVERLOAD
};
template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
struct VariadicFunction2 {
ResultT operator()(Param0T P0, Param1T P1) const {
return Func(P0, P1, None);
}
#define LLVM_DEFINE_OVERLOAD(N) \
ResultT operator()(Param0T P0, Param1T P1, \
LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
return Func(P0, P1, makeArrayRef(Args)); \
}
LLVM_DEFINE_OVERLOAD(1)
LLVM_DEFINE_OVERLOAD(2)
LLVM_DEFINE_OVERLOAD(3)
LLVM_DEFINE_OVERLOAD(4)
LLVM_DEFINE_OVERLOAD(5)
LLVM_DEFINE_OVERLOAD(6)
LLVM_DEFINE_OVERLOAD(7)
LLVM_DEFINE_OVERLOAD(8)
LLVM_DEFINE_OVERLOAD(9)
LLVM_DEFINE_OVERLOAD(10)
LLVM_DEFINE_OVERLOAD(11)
LLVM_DEFINE_OVERLOAD(12)
LLVM_DEFINE_OVERLOAD(13)
LLVM_DEFINE_OVERLOAD(14)
LLVM_DEFINE_OVERLOAD(15)
LLVM_DEFINE_OVERLOAD(16)
LLVM_DEFINE_OVERLOAD(17)
LLVM_DEFINE_OVERLOAD(18)
LLVM_DEFINE_OVERLOAD(19)
LLVM_DEFINE_OVERLOAD(20)
LLVM_DEFINE_OVERLOAD(21)
LLVM_DEFINE_OVERLOAD(22)
LLVM_DEFINE_OVERLOAD(23)
LLVM_DEFINE_OVERLOAD(24)
LLVM_DEFINE_OVERLOAD(25)
LLVM_DEFINE_OVERLOAD(26)
LLVM_DEFINE_OVERLOAD(27)
LLVM_DEFINE_OVERLOAD(28)
LLVM_DEFINE_OVERLOAD(29)
LLVM_DEFINE_OVERLOAD(30)
LLVM_DEFINE_OVERLOAD(31)
LLVM_DEFINE_OVERLOAD(32)
#undef LLVM_DEFINE_OVERLOAD
};
template <typename ResultT, typename Param0T, typename Param1T,
typename Param2T, typename ArgT,
ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
struct VariadicFunction3 {
ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
return Func(P0, P1, P2, None);
}
#define LLVM_DEFINE_OVERLOAD(N) \
ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
return Func(P0, P1, P2, makeArrayRef(Args)); \
}
LLVM_DEFINE_OVERLOAD(1)
LLVM_DEFINE_OVERLOAD(2)
LLVM_DEFINE_OVERLOAD(3)
LLVM_DEFINE_OVERLOAD(4)
LLVM_DEFINE_OVERLOAD(5)
LLVM_DEFINE_OVERLOAD(6)
LLVM_DEFINE_OVERLOAD(7)
LLVM_DEFINE_OVERLOAD(8)
LLVM_DEFINE_OVERLOAD(9)
LLVM_DEFINE_OVERLOAD(10)
LLVM_DEFINE_OVERLOAD(11)
LLVM_DEFINE_OVERLOAD(12)
LLVM_DEFINE_OVERLOAD(13)
LLVM_DEFINE_OVERLOAD(14)
LLVM_DEFINE_OVERLOAD(15)
LLVM_DEFINE_OVERLOAD(16)
LLVM_DEFINE_OVERLOAD(17)
LLVM_DEFINE_OVERLOAD(18)
LLVM_DEFINE_OVERLOAD(19)
LLVM_DEFINE_OVERLOAD(20)
LLVM_DEFINE_OVERLOAD(21)
LLVM_DEFINE_OVERLOAD(22)
LLVM_DEFINE_OVERLOAD(23)
LLVM_DEFINE_OVERLOAD(24)
LLVM_DEFINE_OVERLOAD(25)
LLVM_DEFINE_OVERLOAD(26)
LLVM_DEFINE_OVERLOAD(27)
LLVM_DEFINE_OVERLOAD(28)
LLVM_DEFINE_OVERLOAD(29)
LLVM_DEFINE_OVERLOAD(30)
LLVM_DEFINE_OVERLOAD(31)
LLVM_DEFINE_OVERLOAD(32)
#undef LLVM_DEFINE_OVERLOAD
};
// Cleanup the macro namespace.
#undef LLVM_COMMA_JOIN1
#undef LLVM_COMMA_JOIN2
#undef LLVM_COMMA_JOIN3
#undef LLVM_COMMA_JOIN4
#undef LLVM_COMMA_JOIN5
#undef LLVM_COMMA_JOIN6
#undef LLVM_COMMA_JOIN7
#undef LLVM_COMMA_JOIN8
#undef LLVM_COMMA_JOIN9
#undef LLVM_COMMA_JOIN10
#undef LLVM_COMMA_JOIN11
#undef LLVM_COMMA_JOIN12
#undef LLVM_COMMA_JOIN13
#undef LLVM_COMMA_JOIN14
#undef LLVM_COMMA_JOIN15
#undef LLVM_COMMA_JOIN16
#undef LLVM_COMMA_JOIN17
#undef LLVM_COMMA_JOIN18
#undef LLVM_COMMA_JOIN19
#undef LLVM_COMMA_JOIN20
#undef LLVM_COMMA_JOIN21
#undef LLVM_COMMA_JOIN22
#undef LLVM_COMMA_JOIN23
#undef LLVM_COMMA_JOIN24
#undef LLVM_COMMA_JOIN25
#undef LLVM_COMMA_JOIN26
#undef LLVM_COMMA_JOIN27
#undef LLVM_COMMA_JOIN28
#undef LLVM_COMMA_JOIN29
#undef LLVM_COMMA_JOIN30
#undef LLVM_COMMA_JOIN31
#undef LLVM_COMMA_JOIN32
} // end namespace llvm
#endif // LLVM_ADT_VARIADICFUNCTION_H

View File

@ -1,103 +0,0 @@
//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a Levenshtein distance function that works for any two
// sequences, with each element of each sequence being analogous to a character
// in a string.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_EDIT_DISTANCE_H
#define LLVM_ADT_EDIT_DISTANCE_H
#include "llvm/ADT/ArrayRef.h"
#include <algorithm>
#include <memory>
namespace llvm {
/// \brief Determine the edit distance between two sequences.
///
/// \param FromArray the first sequence to compare.
///
/// \param ToArray the second sequence to compare.
///
/// \param AllowReplacements whether to allow element replacements (change one
/// element into another) as a single operation, rather than as two operations
/// (an insertion and a removal).
///
/// \param MaxEditDistance If non-zero, the maximum edit distance that this
/// routine is allowed to compute. If the edit distance will exceed that
/// maximum, returns \c MaxEditDistance+1.
///
/// \returns the minimum number of element insertions, removals, or (if
/// \p AllowReplacements is \c true) replacements needed to transform one of
/// the given sequences into the other. If zero, the sequences are identical.
template<typename T>
unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
bool AllowReplacements = true,
unsigned MaxEditDistance = 0) {
// The algorithm implemented below is the "classic"
// dynamic-programming algorithm for computing the Levenshtein
// distance, which is described here:
//
// http://en.wikipedia.org/wiki/Levenshtein_distance
//
// Although the algorithm is typically described using an m x n
// array, only one row plus one element are used at a time, so this
// implementation just keeps one vector for the row. To update one entry,
// only the entries to the left, top, and top-left are needed. The left
// entry is in Row[x-1], the top entry is what's in Row[x] from the last
// iteration, and the top-left entry is stored in Previous.
typename ArrayRef<T>::size_type m = FromArray.size();
typename ArrayRef<T>::size_type n = ToArray.size();
const unsigned SmallBufferSize = 64;
unsigned SmallBuffer[SmallBufferSize];
std::unique_ptr<unsigned[]> Allocated;
unsigned *Row = SmallBuffer;
if (n + 1 > SmallBufferSize) {
Row = new unsigned[n + 1];
Allocated.reset(Row);
}
for (unsigned i = 1; i <= n; ++i)
Row[i] = i;
for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
Row[0] = y;
unsigned BestThisRow = Row[0];
unsigned Previous = y - 1;
for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
int OldRow = Row[x];
if (AllowReplacements) {
Row[x] = std::min(
Previous + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
std::min(Row[x-1], Row[x])+1);
}
else {
if (FromArray[y-1] == ToArray[x-1]) Row[x] = Previous;
else Row[x] = std::min(Row[x-1], Row[x]) + 1;
}
Previous = OldRow;
BestThisRow = std::min(BestThisRow, Row[x]);
}
if (MaxEditDistance && BestThisRow > MaxEditDistance)
return MaxEditDistance + 1;
}
unsigned Result = Row[n];
return Result;
}
} // End llvm namespace
#endif

View File

@ -1,752 +0,0 @@
//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines classes to implement an intrusive doubly linked list class
// (i.e. each node of the list must contain a next and previous field for the
// list.
//
// The ilist_traits trait class is used to gain access to the next and previous
// fields of the node type that the list is instantiated with. If it is not
// specialized, the list defaults to using the getPrev(), getNext() method calls
// to get the next and previous pointers.
//
// The ilist class itself, should be a plug in replacement for list, assuming
// that the nodes contain next/prev pointers. This list replacement does not
// provide a constant time size() method, so be careful to use empty() when you
// really want to know if it's empty.
//
// The ilist class is implemented by allocating a 'tail' node when the list is
// created (using ilist_traits<>::createSentinel()). This tail node is
// absolutely required because the user must be able to compute end()-1. Because
// of this, users of the direct next/prev links will see an extra link on the
// end of the list, which should be ignored.
//
// Requirements for a user of this list:
//
// 1. The user must provide {g|s}et{Next|Prev} methods, or specialize
// ilist_traits to provide an alternate way of getting and setting next and
// prev links.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
namespace llvm {
template<typename NodeTy, typename Traits> class iplist;
template<typename NodeTy> class ilist_iterator;
/// ilist_nextprev_traits - A fragment for template traits for intrusive list
/// that provides default next/prev implementations for common operations.
///
template<typename NodeTy>
struct ilist_nextprev_traits {
static NodeTy *getPrev(NodeTy *N) { return N->getPrev(); }
static NodeTy *getNext(NodeTy *N) { return N->getNext(); }
static const NodeTy *getPrev(const NodeTy *N) { return N->getPrev(); }
static const NodeTy *getNext(const NodeTy *N) { return N->getNext(); }
static void setPrev(NodeTy *N, NodeTy *Prev) { N->setPrev(Prev); }
static void setNext(NodeTy *N, NodeTy *Next) { N->setNext(Next); }
};
template<typename NodeTy>
struct ilist_traits;
/// ilist_sentinel_traits - A fragment for template traits for intrusive list
/// that provides default sentinel implementations for common operations.
///
/// ilist_sentinel_traits implements a lazy dynamic sentinel allocation
/// strategy. The sentinel is stored in the prev field of ilist's Head.
///
template<typename NodeTy>
struct ilist_sentinel_traits {
/// createSentinel - create the dynamic sentinel
static NodeTy *createSentinel() { return new NodeTy(); }
/// destroySentinel - deallocate the dynamic sentinel
static void destroySentinel(NodeTy *N) { delete N; }
/// provideInitialHead - when constructing an ilist, provide a starting
/// value for its Head
/// @return null node to indicate that it needs to be allocated later
static NodeTy *provideInitialHead() { return nullptr; }
/// ensureHead - make sure that Head is either already
/// initialized or assigned a fresh sentinel
/// @return the sentinel
static NodeTy *ensureHead(NodeTy *&Head) {
if (!Head) {
Head = ilist_traits<NodeTy>::createSentinel();
ilist_traits<NodeTy>::noteHead(Head, Head);
ilist_traits<NodeTy>::setNext(Head, nullptr);
return Head;
}
return ilist_traits<NodeTy>::getPrev(Head);
}
/// noteHead - stash the sentinel into its default location
static void noteHead(NodeTy *NewHead, NodeTy *Sentinel) {
ilist_traits<NodeTy>::setPrev(NewHead, Sentinel);
}
};
template <typename NodeTy> class ilist_half_node;
template <typename NodeTy> class ilist_node;
/// Traits with an embedded ilist_node as a sentinel.
///
/// FIXME: The downcast in createSentinel() is UB.
template <typename NodeTy> struct ilist_embedded_sentinel_traits {
/// Get hold of the node that marks the end of the list.
NodeTy *createSentinel() const {
// Since i(p)lists always publicly derive from their corresponding traits,
// placing a data member in this class will augment the i(p)list. But since
// the NodeTy is expected to be publicly derive from ilist_node<NodeTy>,
// there is a legal viable downcast from it to NodeTy. We use this trick to
// superimpose an i(p)list with a "ghostly" NodeTy, which becomes the
// sentinel. Dereferencing the sentinel is forbidden (save the
// ilist_node<NodeTy>), so no one will ever notice the superposition.
return static_cast<NodeTy *>(&Sentinel);
}
static void destroySentinel(NodeTy *) {}
NodeTy *provideInitialHead() const { return createSentinel(); }
NodeTy *ensureHead(NodeTy *) const { return createSentinel(); }
static void noteHead(NodeTy *, NodeTy *) {}
private:
mutable ilist_node<NodeTy> Sentinel;
};
/// Trait with an embedded ilist_half_node as a sentinel.
///
/// FIXME: The downcast in createSentinel() is UB.
template <typename NodeTy> struct ilist_half_embedded_sentinel_traits {
/// Get hold of the node that marks the end of the list.
NodeTy *createSentinel() const {
// See comment in ilist_embedded_sentinel_traits::createSentinel().
return static_cast<NodeTy *>(&Sentinel);
}
static void destroySentinel(NodeTy *) {}
NodeTy *provideInitialHead() const { return createSentinel(); }
NodeTy *ensureHead(NodeTy *) const { return createSentinel(); }
static void noteHead(NodeTy *, NodeTy *) {}
private:
mutable ilist_half_node<NodeTy> Sentinel;
};
/// ilist_node_traits - A fragment for template traits for intrusive list
/// that provides default node related operations.
///
template<typename NodeTy>
struct ilist_node_traits {
static NodeTy *createNode(const NodeTy &V) { return new NodeTy(V); }
static void deleteNode(NodeTy *V) { delete V; }
void addNodeToList(NodeTy *) {}
void removeNodeFromList(NodeTy *) {}
void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
ilist_iterator<NodeTy> /*first*/,
ilist_iterator<NodeTy> /*last*/) {}
};
/// ilist_default_traits - Default template traits for intrusive list.
/// By inheriting from this, you can easily use default implementations
/// for all common operations.
///
template<typename NodeTy>
struct ilist_default_traits : public ilist_nextprev_traits<NodeTy>,
public ilist_sentinel_traits<NodeTy>,
public ilist_node_traits<NodeTy> {
};
// Template traits for intrusive list. By specializing this template class, you
// can change what next/prev fields are used to store the links...
template<typename NodeTy>
struct ilist_traits : public ilist_default_traits<NodeTy> {};
// Const traits are the same as nonconst traits...
template<typename Ty>
struct ilist_traits<const Ty> : public ilist_traits<Ty> {};
//===----------------------------------------------------------------------===//
// Iterator for intrusive list.
//
template <typename NodeTy>
class ilist_iterator
: public std::iterator<std::bidirectional_iterator_tag, NodeTy, ptrdiff_t> {
public:
typedef ilist_traits<NodeTy> Traits;
typedef std::iterator<std::bidirectional_iterator_tag, NodeTy, ptrdiff_t>
super;
typedef typename super::value_type value_type;
typedef typename super::difference_type difference_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
private:
pointer NodePtr;
public:
explicit ilist_iterator(pointer NP) : NodePtr(NP) {}
explicit ilist_iterator(reference NR) : NodePtr(&NR) {}
ilist_iterator() : NodePtr(nullptr) {}
// This is templated so that we can allow constructing a const iterator from
// a nonconst iterator...
template <class node_ty>
ilist_iterator(const ilist_iterator<node_ty> &RHS)
: NodePtr(RHS.getNodePtrUnchecked()) {}
// This is templated so that we can allow assigning to a const iterator from
// a nonconst iterator...
template <class node_ty>
const ilist_iterator &operator=(const ilist_iterator<node_ty> &RHS) {
NodePtr = RHS.getNodePtrUnchecked();
return *this;
}
void reset(pointer NP) { NodePtr = NP; }
// Accessors...
explicit operator pointer() const { return NodePtr; }
reference operator*() const { return *NodePtr; }
pointer operator->() const { return &operator*(); }
// Comparison operators
template <class Y> bool operator==(const ilist_iterator<Y> &RHS) const {
return NodePtr == RHS.getNodePtrUnchecked();
}
template <class Y> bool operator!=(const ilist_iterator<Y> &RHS) const {
return NodePtr != RHS.getNodePtrUnchecked();
}
// Increment and decrement operators...
ilist_iterator &operator--() {
NodePtr = Traits::getPrev(NodePtr);
assert(NodePtr && "--'d off the beginning of an ilist!");
return *this;
}
ilist_iterator &operator++() {
NodePtr = Traits::getNext(NodePtr);
return *this;
}
ilist_iterator operator--(int) {
ilist_iterator tmp = *this;
--*this;
return tmp;
}
ilist_iterator operator++(int) {
ilist_iterator tmp = *this;
++*this;
return tmp;
}
// Internal interface, do not use...
pointer getNodePtrUnchecked() const { return NodePtr; }
};
// Allow ilist_iterators to convert into pointers to a node automatically when
// used by the dyn_cast, cast, isa mechanisms...
template<typename From> struct simplify_type;
template<typename NodeTy> struct simplify_type<ilist_iterator<NodeTy> > {
typedef NodeTy* SimpleType;
static SimpleType getSimplifiedValue(ilist_iterator<NodeTy> &Node) {
return &*Node;
}
};
template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > {
typedef /*const*/ NodeTy* SimpleType;
static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) {
return &*Node;
}
};
//===----------------------------------------------------------------------===//
//
/// iplist - The subset of list functionality that can safely be used on nodes
/// of polymorphic types, i.e. a heterogeneous list with a common base class that
/// holds the next/prev pointers. The only state of the list itself is a single
/// pointer to the head of the list.
///
/// This list can be in one of three interesting states:
/// 1. The list may be completely unconstructed. In this case, the head
/// pointer is null. When in this form, any query for an iterator (e.g.
/// begin() or end()) causes the list to transparently change to state #2.
/// 2. The list may be empty, but contain a sentinel for the end iterator. This
/// sentinel is created by the Traits::createSentinel method and is a link
/// in the list. When the list is empty, the pointer in the iplist points
/// to the sentinel. Once the sentinel is constructed, it
/// is not destroyed until the list is.
/// 3. The list may contain actual objects in it, which are stored as a doubly
/// linked list of nodes. One invariant of the list is that the predecessor
/// of the first node in the list always points to the last node in the list,
/// and the successor pointer for the sentinel (which always stays at the
/// end of the list) is always null.
///
template<typename NodeTy, typename Traits=ilist_traits<NodeTy> >
class iplist : public Traits {
mutable NodeTy *Head;
// Use the prev node pointer of 'head' as the tail pointer. This is really a
// circularly linked list where we snip the 'next' link from the sentinel node
// back to the first node in the list (to preserve assertions about going off
// the end of the list).
NodeTy *getTail() { return this->ensureHead(Head); }
const NodeTy *getTail() const { return this->ensureHead(Head); }
void setTail(NodeTy *N) const { this->noteHead(Head, N); }
/// CreateLazySentinel - This method verifies whether the sentinel for the
/// list has been created and lazily makes it if not.
void CreateLazySentinel() const {
this->ensureHead(Head);
}
static bool op_less(NodeTy &L, NodeTy &R) { return L < R; }
static bool op_equal(NodeTy &L, NodeTy &R) { return L == R; }
// No fundamental reason why iplist can't be copyable, but the default
// copy/copy-assign won't do.
iplist(const iplist &) = delete;
void operator=(const iplist &) = delete;
public:
typedef NodeTy *pointer;
typedef const NodeTy *const_pointer;
typedef NodeTy &reference;
typedef const NodeTy &const_reference;
typedef NodeTy value_type;
typedef ilist_iterator<NodeTy> iterator;
typedef ilist_iterator<const NodeTy> const_iterator;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
iplist() : Head(this->provideInitialHead()) {}
~iplist() {
if (!Head) return;
clear();
Traits::destroySentinel(getTail());
}
// Iterator creation methods.
iterator begin() {
CreateLazySentinel();
return iterator(Head);
}
const_iterator begin() const {
CreateLazySentinel();
return const_iterator(Head);
}
iterator end() {
CreateLazySentinel();
return iterator(getTail());
}
const_iterator end() const {
CreateLazySentinel();
return const_iterator(getTail());
}
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
// Miscellaneous inspection routines.
size_type max_size() const { return size_type(-1); }
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
return !Head || Head == getTail();
}
// Front and back accessor functions...
reference front() {
assert(!empty() && "Called front() on empty list!");
return *Head;
}
const_reference front() const {
assert(!empty() && "Called front() on empty list!");
return *Head;
}
reference back() {
assert(!empty() && "Called back() on empty list!");
return *this->getPrev(getTail());
}
const_reference back() const {
assert(!empty() && "Called back() on empty list!");
return *this->getPrev(getTail());
}
void swap(iplist &RHS) {
assert(0 && "Swap does not use list traits callback correctly yet!");
std::swap(Head, RHS.Head);
}
iterator insert(iterator where, NodeTy *New) {
NodeTy *CurNode = where.getNodePtrUnchecked();
NodeTy *PrevNode = this->getPrev(CurNode);
this->setNext(New, CurNode);
this->setPrev(New, PrevNode);
if (CurNode != Head) // Is PrevNode off the beginning of the list?
this->setNext(PrevNode, New);
else
Head = New;
this->setPrev(CurNode, New);
this->addNodeToList(New); // Notify traits that we added a node...
return iterator(New);
}
iterator insert(iterator where, const NodeTy &New) {
return this->insert(where, new NodeTy(New));
}
iterator insertAfter(iterator where, NodeTy *New) {
if (empty())
return insert(begin(), New);
else
return insert(++where, New);
}
NodeTy *remove(iterator &IT) {
assert(IT != end() && "Cannot remove end of list!");
NodeTy *Node = &*IT;
NodeTy *NextNode = this->getNext(Node);
NodeTy *PrevNode = this->getPrev(Node);
if (Node != Head) // Is PrevNode off the beginning of the list?
this->setNext(PrevNode, NextNode);
else
Head = NextNode;
this->setPrev(NextNode, PrevNode);
IT.reset(NextNode);
this->removeNodeFromList(Node); // Notify traits that we removed a node...
// Set the next/prev pointers of the current node to null. This isn't
// strictly required, but this catches errors where a node is removed from
// an ilist (and potentially deleted) with iterators still pointing at it.
// When those iterators are incremented or decremented, they will assert on
// the null next/prev pointer instead of "usually working".
this->setNext(Node, nullptr);
this->setPrev(Node, nullptr);
return Node;
}
NodeTy *remove(const iterator &IT) {
iterator MutIt = IT;
return remove(MutIt);
}
NodeTy *remove(NodeTy *IT) { return remove(iterator(IT)); }
NodeTy *remove(NodeTy &IT) { return remove(iterator(IT)); }
// erase - remove a node from the controlled sequence... and delete it.
iterator erase(iterator where) {
this->deleteNode(remove(where));
return where;
}
iterator erase(NodeTy *IT) { return erase(iterator(IT)); }
iterator erase(NodeTy &IT) { return erase(iterator(IT)); }
/// Remove all nodes from the list like clear(), but do not call
/// removeNodeFromList() or deleteNode().
///
/// This should only be used immediately before freeing nodes in bulk to
/// avoid traversing the list and bringing all the nodes into cache.
void clearAndLeakNodesUnsafely() {
if (Head) {
Head = getTail();
this->setPrev(Head, Head);
}
}
private:
// transfer - The heart of the splice function. Move linked list nodes from
// [first, last) into position.
//
void transfer(iterator position, iplist &L2, iterator first, iterator last) {
assert(first != last && "Should be checked by callers");
// Position cannot be contained in the range to be transferred.
// Check for the most common mistake.
assert(position != first &&
"Insertion point can't be one of the transferred nodes");
if (position != last) {
// Note: we have to be careful about the case when we move the first node
// in the list. This node is the list sentinel node and we can't move it.
NodeTy *ThisSentinel = getTail();
setTail(nullptr);
NodeTy *L2Sentinel = L2.getTail();
L2.setTail(nullptr);
// Remove [first, last) from its old position.
NodeTy *First = &*first, *Prev = this->getPrev(First);
NodeTy *Next = last.getNodePtrUnchecked(), *Last = this->getPrev(Next);
if (Prev)
this->setNext(Prev, Next);
else
L2.Head = Next;
this->setPrev(Next, Prev);
// Splice [first, last) into its new position.
NodeTy *PosNext = position.getNodePtrUnchecked();
NodeTy *PosPrev = this->getPrev(PosNext);
// Fix head of list...
if (PosPrev)
this->setNext(PosPrev, First);
else
Head = First;
this->setPrev(First, PosPrev);
// Fix end of list...
this->setNext(Last, PosNext);
this->setPrev(PosNext, Last);
this->transferNodesFromList(L2, iterator(First), iterator(PosNext));
// Now that everything is set, restore the pointers to the list sentinels.
L2.setTail(L2Sentinel);
setTail(ThisSentinel);
}
}
public:
//===----------------------------------------------------------------------===
// Functionality derived from other functions defined above...
//
size_type LLVM_ATTRIBUTE_UNUSED_RESULT size() const {
if (!Head) return 0; // Don't require construction of sentinel if empty.
return std::distance(begin(), end());
}
iterator erase(iterator first, iterator last) {
while (first != last)
first = erase(first);
return last;
}
void clear() { if (Head) erase(begin(), end()); }
// Front and back inserters...
void push_front(NodeTy *val) { insert(begin(), val); }
void push_back(NodeTy *val) { insert(end(), val); }
void pop_front() {
assert(!empty() && "pop_front() on empty list!");
erase(begin());
}
void pop_back() {
assert(!empty() && "pop_back() on empty list!");
iterator t = end(); erase(--t);
}
// Special forms of insert...
template<class InIt> void insert(iterator where, InIt first, InIt last) {
for (; first != last; ++first) insert(where, *first);
}
// Splice members - defined in terms of transfer...
void splice(iterator where, iplist &L2) {
if (!L2.empty())
transfer(where, L2, L2.begin(), L2.end());
}
void splice(iterator where, iplist &L2, iterator first) {
iterator last = first; ++last;
if (where == first || where == last) return; // No change
transfer(where, L2, first, last);
}
void splice(iterator where, iplist &L2, iterator first, iterator last) {
if (first != last) transfer(where, L2, first, last);
}
void splice(iterator where, iplist &L2, NodeTy &N) {
splice(where, L2, iterator(N));
}
void splice(iterator where, iplist &L2, NodeTy *N) {
splice(where, L2, iterator(N));
}
template <class Compare>
void merge(iplist &Right, Compare comp) {
if (this == &Right)
return;
iterator First1 = begin(), Last1 = end();
iterator First2 = Right.begin(), Last2 = Right.end();
while (First1 != Last1 && First2 != Last2) {
if (comp(*First2, *First1)) {
iterator Next = First2;
transfer(First1, Right, First2, ++Next);
First2 = Next;
} else {
++First1;
}
}
if (First2 != Last2)
transfer(Last1, Right, First2, Last2);
}
void merge(iplist &Right) { return merge(Right, op_less); }
template <class Compare>
void sort(Compare comp) {
// The list is empty, vacuously sorted.
if (empty())
return;
// The list has a single element, vacuously sorted.
if (std::next(begin()) == end())
return;
// Find the split point for the list.
iterator Center = begin(), End = begin();
while (End != end() && std::next(End) != end()) {
Center = std::next(Center);
End = std::next(std::next(End));
}
// Split the list into two.
iplist RightHalf;
RightHalf.splice(RightHalf.begin(), *this, Center, end());
// Sort the two sublists.
sort(comp);
RightHalf.sort(comp);
// Merge the two sublists back together.
merge(RightHalf, comp);
}
void sort() { sort(op_less); }
/// \brief Get the previous node, or \c nullptr for the list head.
NodeTy *getPrevNode(NodeTy &N) const {
auto I = N.getIterator();
if (I == begin())
return nullptr;
return &*std::prev(I);
}
/// \brief Get the previous node, or \c nullptr for the list head.
const NodeTy *getPrevNode(const NodeTy &N) const {
return getPrevNode(const_cast<NodeTy &>(N));
}
/// \brief Get the next node, or \c nullptr for the list tail.
NodeTy *getNextNode(NodeTy &N) const {
auto Next = std::next(N.getIterator());
if (Next == end())
return nullptr;
return &*Next;
}
/// \brief Get the next node, or \c nullptr for the list tail.
const NodeTy *getNextNode(const NodeTy &N) const {
return getNextNode(const_cast<NodeTy &>(N));
}
};
template<typename NodeTy>
struct ilist : public iplist<NodeTy> {
typedef typename iplist<NodeTy>::size_type size_type;
typedef typename iplist<NodeTy>::iterator iterator;
ilist() {}
ilist(const ilist &right) : iplist<NodeTy>() {
insert(this->begin(), right.begin(), right.end());
}
explicit ilist(size_type count) {
insert(this->begin(), count, NodeTy());
}
ilist(size_type count, const NodeTy &val) {
insert(this->begin(), count, val);
}
template<class InIt> ilist(InIt first, InIt last) {
insert(this->begin(), first, last);
}
// bring hidden functions into scope
using iplist<NodeTy>::insert;
using iplist<NodeTy>::push_front;
using iplist<NodeTy>::push_back;
// Main implementation here - Insert for a node passed by value...
iterator insert(iterator where, const NodeTy &val) {
return insert(where, this->createNode(val));
}
// Front and back inserters...
void push_front(const NodeTy &val) { insert(this->begin(), val); }
void push_back(const NodeTy &val) { insert(this->end(), val); }
void insert(iterator where, size_type count, const NodeTy &val) {
for (; count != 0; --count) insert(where, val);
}
// Assign special forms...
void assign(size_type count, const NodeTy &val) {
iterator I = this->begin();
for (; I != this->end() && count != 0; ++I, --count)
*I = val;
if (count != 0)
insert(this->end(), val, val);
else
erase(I, this->end());
}
template<class InIt> void assign(InIt first1, InIt last1) {
iterator first2 = this->begin(), last2 = this->end();
for ( ; first1 != last1 && first2 != last2; ++first1, ++first2)
*first1 = *first2;
if (first2 == last2)
erase(first1, last1);
else
insert(last1, first2, last2);
}
// Resize members...
void resize(size_type newsize, NodeTy val) {
iterator i = this->begin();
size_type len = 0;
for ( ; i != this->end() && len < newsize; ++i, ++len) /* empty*/ ;
if (len == newsize)
erase(i, this->end());
else // i == end()
insert(this->end(), newsize - len, val);
}
void resize(size_type newsize) { resize(newsize, NodeTy()); }
};
} // End llvm namespace
namespace std {
// Ensure that swap uses the fast list swap...
template<class Ty>
void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
Left.swap(Right);
}
} // End 'std' extensions...
#endif // LLVM_ADT_ILIST_H

View File

@ -1,123 +0,0 @@
//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ilist_node class template, which is a convenient
// base class for creating classes that can be used with ilists.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ILIST_NODE_H
#define LLVM_ADT_ILIST_NODE_H
namespace llvm {
template<typename NodeTy>
struct ilist_traits;
template <typename NodeTy> struct ilist_embedded_sentinel_traits;
template <typename NodeTy> struct ilist_half_embedded_sentinel_traits;
/// ilist_half_node - Base class that provides prev services for sentinels.
///
template<typename NodeTy>
class ilist_half_node {
friend struct ilist_traits<NodeTy>;
friend struct ilist_half_embedded_sentinel_traits<NodeTy>;
NodeTy *Prev;
protected:
NodeTy *getPrev() { return Prev; }
const NodeTy *getPrev() const { return Prev; }
void setPrev(NodeTy *P) { Prev = P; }
ilist_half_node() : Prev(nullptr) {}
};
template<typename NodeTy>
struct ilist_nextprev_traits;
template <typename NodeTy> class ilist_iterator;
/// ilist_node - Base class that provides next/prev services for nodes
/// that use ilist_nextprev_traits or ilist_default_traits.
///
template<typename NodeTy>
class ilist_node : private ilist_half_node<NodeTy> {
friend struct ilist_nextprev_traits<NodeTy>;
friend struct ilist_traits<NodeTy>;
friend struct ilist_half_embedded_sentinel_traits<NodeTy>;
friend struct ilist_embedded_sentinel_traits<NodeTy>;
NodeTy *Next;
NodeTy *getNext() { return Next; }
const NodeTy *getNext() const { return Next; }
void setNext(NodeTy *N) { Next = N; }
protected:
ilist_node() : Next(nullptr) {}
public:
ilist_iterator<NodeTy> getIterator() {
// FIXME: Stop downcasting to create the iterator (potential UB).
return ilist_iterator<NodeTy>(static_cast<NodeTy *>(this));
}
ilist_iterator<const NodeTy> getIterator() const {
// FIXME: Stop downcasting to create the iterator (potential UB).
return ilist_iterator<const NodeTy>(static_cast<const NodeTy *>(this));
}
};
/// An ilist node that can access its parent list.
///
/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
template <typename NodeTy, typename ParentTy>
class ilist_node_with_parent : public ilist_node<NodeTy> {
protected:
ilist_node_with_parent() = default;
private:
/// Forward to NodeTy::getParent().
///
/// Note: do not use the name "getParent()". We want a compile error
/// (instead of recursion) when the subclass fails to implement \a
/// getParent().
const ParentTy *getNodeParent() const {
return static_cast<const NodeTy *>(this)->getParent();
}
public:
/// @name Adjacent Node Accessors
/// @{
/// \brief Get the previous node, or \c nullptr for the list head.
NodeTy *getPrevNode() {
// Should be separated to a reused function, but then we couldn't use auto
// (and would need the type of the list).
const auto &List =
getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
return List.getPrevNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the previous node, or \c nullptr for the list head.
const NodeTy *getPrevNode() const {
return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
}
/// \brief Get the next node, or \c nullptr for the list tail.
NodeTy *getNextNode() {
// Should be separated to a reused function, but then we couldn't use auto
// (and would need the type of the list).
const auto &List =
getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
return List.getNextNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the next node, or \c nullptr for the list tail.
const NodeTy *getNextNode() const {
return const_cast<ilist_node_with_parent *>(this)->getNextNode();
}
/// @}
};
} // End llvm namespace
#endif

View File

@ -1,261 +0,0 @@
//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ITERATOR_H
#define LLVM_ADT_ITERATOR_H
#include <cstddef>
#include <iterator>
namespace llvm {
/// \brief CRTP base class which implements the entire standard iterator facade
/// in terms of a minimal subset of the interface.
///
/// Use this when it is reasonable to implement most of the iterator
/// functionality in terms of a core subset. If you need special behavior or
/// there are performance implications for this, you may want to override the
/// relevant members instead.
///
/// Note, one abstraction that this does *not* provide is implementing
/// subtraction in terms of addition by negating the difference. Negation isn't
/// always information preserving, and I can see very reasonable iterator
/// designs where this doesn't work well. It doesn't really force much added
/// boilerplate anyways.
///
/// Another abstraction that this doesn't provide is implementing increment in
/// terms of addition of one. These aren't equivalent for all iterator
/// categories, and respecting that adds a lot of complexity for little gain.
template <typename DerivedT, typename IteratorCategoryT, typename T,
typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
typename ReferenceT = T &>
class iterator_facade_base
: public std::iterator<IteratorCategoryT, T, DifferenceTypeT, PointerT,
ReferenceT> {
protected:
enum {
IsRandomAccess =
std::is_base_of<std::random_access_iterator_tag, IteratorCategoryT>::value,
IsBidirectional =
std::is_base_of<std::bidirectional_iterator_tag, IteratorCategoryT>::value,
};
/// A proxy object for computing a reference via indirecting a copy of an
/// iterator. This is used in APIs which need to produce a reference via
/// indirection but for which the iterator object might be a temporary. The
/// proxy preserves the iterator internally and exposes the indirected
/// reference via a conversion operator.
class ReferenceProxy {
friend iterator_facade_base;
DerivedT I;
ReferenceProxy(DerivedT I) : I(std::move(I)) {}
public:
operator ReferenceT() const { return *I; }
};
public:
DerivedT operator+(DifferenceTypeT n) const {
static_assert(
IsRandomAccess,
"The '+' operator is only defined for random access iterators.");
DerivedT tmp = *static_cast<const DerivedT *>(this);
tmp += n;
return tmp;
}
friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
static_assert(
IsRandomAccess,
"The '+' operator is only defined for random access iterators.");
return i + n;
}
DerivedT operator-(DifferenceTypeT n) const {
static_assert(
IsRandomAccess,
"The '-' operator is only defined for random access iterators.");
DerivedT tmp = *static_cast<const DerivedT *>(this);
tmp -= n;
return tmp;
}
DerivedT &operator++() {
return static_cast<DerivedT *>(this)->operator+=(1);
}
DerivedT operator++(int) {
DerivedT tmp = *static_cast<DerivedT *>(this);
++*static_cast<DerivedT *>(this);
return tmp;
}
DerivedT &operator--() {
static_assert(
IsBidirectional,
"The decrement operator is only defined for bidirectional iterators.");
return static_cast<DerivedT *>(this)->operator-=(1);
}
DerivedT operator--(int) {
static_assert(
IsBidirectional,
"The decrement operator is only defined for bidirectional iterators.");
DerivedT tmp = *static_cast<DerivedT *>(this);
--*static_cast<DerivedT *>(this);
return tmp;
}
bool operator!=(const DerivedT &RHS) const {
return !static_cast<const DerivedT *>(this)->operator==(RHS);
}
bool operator>(const DerivedT &RHS) const {
static_assert(
IsRandomAccess,
"Relational operators are only defined for random access iterators.");
return !static_cast<const DerivedT *>(this)->operator<(RHS) &&
!static_cast<const DerivedT *>(this)->operator==(RHS);
}
bool operator<=(const DerivedT &RHS) const {
static_assert(
IsRandomAccess,
"Relational operators are only defined for random access iterators.");
return !static_cast<const DerivedT *>(this)->operator>(RHS);
}
bool operator>=(const DerivedT &RHS) const {
static_assert(
IsRandomAccess,
"Relational operators are only defined for random access iterators.");
return !static_cast<const DerivedT *>(this)->operator<(RHS);
}
PointerT operator->() const {
return &static_cast<const DerivedT *>(this)->operator*();
}
ReferenceProxy operator[](DifferenceTypeT n) const {
static_assert(IsRandomAccess,
"Subscripting is only defined for random access iterators.");
return ReferenceProxy(static_cast<const DerivedT *>(this)->operator+(n));
}
};
/// \brief CRTP base class for adapting an iterator to a different type.
///
/// This class can be used through CRTP to adapt one iterator into another.
/// Typically this is done through providing in the derived class a custom \c
/// operator* implementation. Other methods can be overridden as well.
template <
typename DerivedT, typename WrappedIteratorT,
typename IteratorCategoryT =
typename std::iterator_traits<WrappedIteratorT>::iterator_category,
typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
typename DifferenceTypeT =
typename std::iterator_traits<WrappedIteratorT>::difference_type,
typename PointerT = typename std::conditional<
std::is_same<T, typename std::iterator_traits<
WrappedIteratorT>::value_type>::value,
typename std::iterator_traits<WrappedIteratorT>::pointer, T *>::type,
typename ReferenceT = typename std::conditional<
std::is_same<T, typename std::iterator_traits<
WrappedIteratorT>::value_type>::value,
typename std::iterator_traits<WrappedIteratorT>::reference, T &>::type,
// Don't provide these, they are mostly to act as aliases below.
typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>>
class iterator_adaptor_base
: public iterator_facade_base<DerivedT, IteratorCategoryT, T,
DifferenceTypeT, PointerT, ReferenceT> {
typedef typename iterator_adaptor_base::iterator_facade_base BaseT;
protected:
WrappedIteratorT I;
iterator_adaptor_base() = default;
explicit iterator_adaptor_base(WrappedIteratorT u) : I(std::move(u)) {}
const WrappedIteratorT &wrapped() const { return I; }
public:
typedef DifferenceTypeT difference_type;
DerivedT &operator+=(difference_type n) {
static_assert(
BaseT::IsRandomAccess,
"The '+=' operator is only defined for random access iterators.");
I += n;
return *static_cast<DerivedT *>(this);
}
DerivedT &operator-=(difference_type n) {
static_assert(
BaseT::IsRandomAccess,
"The '-=' operator is only defined for random access iterators.");
I -= n;
return *static_cast<DerivedT *>(this);
}
using BaseT::operator-;
difference_type operator-(const DerivedT &RHS) const {
static_assert(
BaseT::IsRandomAccess,
"The '-' operator is only defined for random access iterators.");
return I - RHS.I;
}
// We have to explicitly provide ++ and -- rather than letting the facade
// forward to += because WrappedIteratorT might not support +=.
using BaseT::operator++;
DerivedT &operator++() {
++I;
return *static_cast<DerivedT *>(this);
}
using BaseT::operator--;
DerivedT &operator--() {
static_assert(
BaseT::IsBidirectional,
"The decrement operator is only defined for bidirectional iterators.");
--I;
return *static_cast<DerivedT *>(this);
}
bool operator==(const DerivedT &RHS) const { return I == RHS.I; }
bool operator<(const DerivedT &RHS) const {
static_assert(
BaseT::IsRandomAccess,
"Relational operators are only defined for random access iterators.");
return I < RHS.I;
}
ReferenceT operator*() const { return *I; }
};
/// \brief An iterator type that allows iterating over the pointees via some
/// other iterator.
///
/// The typical usage of this is to expose a type that iterates over Ts, but
/// which is implemented with some iterator over T*s:
///
/// \code
/// typedef pointee_iterator<SmallVectorImpl<T *>::iterator> iterator;
/// \endcode
template <typename WrappedIteratorT,
typename T = typename std::remove_reference<
decltype(**std::declval<WrappedIteratorT>())>::type>
struct pointee_iterator
: iterator_adaptor_base<
pointee_iterator<WrappedIteratorT>, WrappedIteratorT,
typename std::iterator_traits<WrappedIteratorT>::iterator_category,
T> {
pointee_iterator() = default;
template <typename U>
pointee_iterator(U &&u)
: pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}
T &operator*() const { return **this->I; }
};
}
#endif

View File

@ -1,68 +0,0 @@
//===- iterator_range.h - A range adaptor for iterators ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This provides a very simple, boring adaptor for a begin and end iterator
/// into a range type. This should be used to build range views that work well
/// with range based for loops and range based constructors.
///
/// Note that code here follows more standards-based coding conventions as it
/// is mirroring proposed interfaces for standardization.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ITERATOR_RANGE_H
#define LLVM_ADT_ITERATOR_RANGE_H
#include <utility>
#include <iterator>
namespace llvm {
/// \brief A range adaptor for a pair of iterators.
///
/// This just wraps two iterators into a range-compatible interface. Nothing
/// fancy at all.
template <typename IteratorT>
class iterator_range {
IteratorT begin_iterator, end_iterator;
public:
//TODO: Add SFINAE to test that the Container's iterators match the range's
// iterators.
template <typename Container>
iterator_range(Container &&c)
//TODO: Consider ADL/non-member begin/end calls.
: begin_iterator(c.begin()), end_iterator(c.end()) {}
iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
: begin_iterator(std::move(begin_iterator)),
end_iterator(std::move(end_iterator)) {}
IteratorT begin() const { return begin_iterator; }
IteratorT end() const { return end_iterator; }
};
/// \brief Convenience function for iterating over sub-ranges.
///
/// This provides a bit of syntactic sugar to make using sub-ranges
/// in for loops a bit easier. Analogous to std::make_pair().
template <class T> iterator_range<T> make_range(T x, T y) {
return iterator_range<T>(std::move(x), std::move(y));
}
template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
return iterator_range<T>(std::move(p.first), std::move(p.second));
}
template<typename T>
iterator_range<decltype(begin(std::declval<T>()))> drop_begin(T &&t, int n) {
return make_range(std::next(begin(t), n), end(t));
}
}
#endif

View File

@ -1,958 +0,0 @@
//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the generic AliasAnalysis interface, which is used as the
// common interface used by all clients of alias analysis information, and
// implemented by all alias analysis implementations. Mod/Ref information is
// also captured by this interface.
//
// Implementations of this interface must implement the various virtual methods,
// which automatically provides functionality for the entire suite of client
// APIs.
//
// This API identifies memory regions with the MemoryLocation class. The pointer
// component specifies the base memory address of the region. The Size specifies
// the maximum size (in address units) of the memory region, or
// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
// identifies the "type" of the memory reference; see the
// TypeBasedAliasAnalysis class for details.
//
// Some non-obvious details include:
// - Pointers that point to two completely different objects in memory never
// alias, regardless of the value of the Size component.
// - NoAlias doesn't imply inequal pointers. The most obvious example of this
// is two pointers to constant memory. Even if they are equal, constant
// memory is never stored to, so there will never be any dependencies.
// In this and other situations, the pointers may be both NoAlias and
// MustAlias at the same time. The current API can only return one result,
// though this is rarely a problem in practice.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
#define LLVM_ANALYSIS_ALIASANALYSIS_H
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
namespace llvm {
class BasicAAResult;
class LoadInst;
class StoreInst;
class VAArgInst;
class DataLayout;
class Pass;
class AnalysisUsage;
class MemTransferInst;
class MemIntrinsic;
class DominatorTree;
class OrderedBasicBlock;
/// The possible results of an alias query.
///
/// These results are always computed between two MemoryLocation objects as
/// a query to some alias analysis.
///
/// Note that these are unscoped enumerations because we would like to support
/// implicitly testing a result for the existence of any possible aliasing with
/// a conversion to bool, but an "enum class" doesn't support this. The
/// canonical names from the literature are suffixed and unique anyways, and so
/// they serve as global constants in LLVM for these results.
///
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
enum AliasResult {
/// The two locations do not alias at all.
///
/// This value is arranged to convert to false, while all other values
/// convert to true. This allows a boolean context to convert the result to
/// a binary flag indicating whether there is the possibility of aliasing.
NoAlias = 0,
/// The two locations may or may not alias. This is the least precise result.
MayAlias,
/// The two locations alias, but only due to a partial overlap.
PartialAlias,
/// The two locations precisely alias each other.
MustAlias,
};
/// Flags indicating whether a memory access modifies or references memory.
///
/// This is no access at all, a modification, a reference, or both
/// a modification and a reference. These are specifically structured such that
/// they form a two bit matrix and bit-tests for 'mod' or 'ref' work with any
/// of the possible values.
enum ModRefInfo {
/// The access neither references nor modifies the value stored in memory.
MRI_NoModRef = 0,
/// The access references the value stored in memory.
MRI_Ref = 1,
/// The access modifies the value stored in memory.
MRI_Mod = 2,
/// The access both references and modifies the value stored in memory.
MRI_ModRef = MRI_Ref | MRI_Mod
};
/// The locations at which a function might access memory.
///
/// These are primarily used in conjunction with the \c AccessKind bits to
/// describe both the nature of access and the locations of access for a
/// function call.
enum FunctionModRefLocation {
/// Base case is no access to memory.
FMRL_Nowhere = 0,
/// Access to memory via argument pointers.
FMRL_ArgumentPointees = 4,
/// Access to any memory.
FMRL_Anywhere = 8 | FMRL_ArgumentPointees
};
/// Summary of how a function affects memory in the program.
///
/// Loads from constant globals are not considered memory accesses for this
/// interface. Also, functions may freely modify stack space local to their
/// invocation without having to report it through these interfaces.
enum FunctionModRefBehavior {
/// This function does not perform any non-local loads or stores to memory.
///
/// This property corresponds to the GCC 'const' attribute.
/// This property corresponds to the LLVM IR 'readnone' attribute.
/// This property corresponds to the IntrNoMem LLVM intrinsic flag.
FMRB_DoesNotAccessMemory = FMRL_Nowhere | MRI_NoModRef,
/// The only memory references in this function (if it has any) are
/// non-volatile loads from objects pointed to by its pointer-typed
/// arguments, with arbitrary offsets.
///
/// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
FMRB_OnlyReadsArgumentPointees = FMRL_ArgumentPointees | MRI_Ref,
/// The only memory references in this function (if it has any) are
/// non-volatile loads and stores from objects pointed to by its
/// pointer-typed arguments, with arbitrary offsets.
///
/// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef,
/// This function does not perform any non-local stores or volatile loads,
/// but may read from any memory location.
///
/// This property corresponds to the GCC 'pure' attribute.
/// This property corresponds to the LLVM IR 'readonly' attribute.
/// This property corresponds to the IntrReadMem LLVM intrinsic flag.
FMRB_OnlyReadsMemory = FMRL_Anywhere | MRI_Ref,
// This function does not read from memory anywhere, but may write to any
// memory location.
//
// This property corresponds to the LLVM IR 'writeonly' attribute.
// This property corresponds to the IntrWriteMem LLVM intrinsic flag.
FMRB_DoesNotReadMemory = FMRL_Anywhere | MRI_Mod,
/// This indicates that the function could not be classified into one of the
/// behaviors above.
FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef
};
class AAResults {
public:
// Make these results default constructable and movable. We have to spell
// these out because MSVC won't synthesize them.
AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
AAResults(AAResults &&Arg);
~AAResults();
/// Register a specific AA result.
template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
// FIXME: We should use a much lighter weight system than the usual
// polymorphic pattern because we don't own AAResult. It should
// ideally involve two pointers and no separate allocation.
AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
}
//===--------------------------------------------------------------------===//
/// \name Alias Queries
/// @{
/// The main low level interface to the alias analysis implementation.
/// Returns an AliasResult indicating whether the two pointers are aliased to
/// each other. This is the interface that must be implemented by specific
/// alias analysis implementations.
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
/// A convenience wrapper around the primary \c alias interface.
AliasResult alias(const Value *V1, uint64_t V1Size, const Value *V2,
uint64_t V2Size) {
return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
/// A convenience wrapper around the primary \c alias interface.
AliasResult alias(const Value *V1, const Value *V2) {
return alias(V1, MemoryLocation::UnknownSize, V2,
MemoryLocation::UnknownSize);
}
/// A trivial helper function to check to see if the specified pointers are
/// no-alias.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
return alias(LocA, LocB) == NoAlias;
}
/// A convenience wrapper around the \c isNoAlias helper interface.
bool isNoAlias(const Value *V1, uint64_t V1Size, const Value *V2,
uint64_t V2Size) {
return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
/// A convenience wrapper around the \c isNoAlias helper interface.
bool isNoAlias(const Value *V1, const Value *V2) {
return isNoAlias(MemoryLocation(V1), MemoryLocation(V2));
}
/// A trivial helper function to check to see if the specified pointers are
/// must-alias.
bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
return alias(LocA, LocB) == MustAlias;
}
/// A convenience wrapper around the \c isMustAlias helper interface.
bool isMustAlias(const Value *V1, const Value *V2) {
return alias(V1, 1, V2, 1) == MustAlias;
}
/// Checks whether the given location points to constant memory, or if
/// \p OrLocal is true whether it points to a local alloca.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false);
/// A convenience wrapper around the primary \c pointsToConstantMemory
/// interface.
bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
return pointsToConstantMemory(MemoryLocation(P), OrLocal);
}
/// @}
//===--------------------------------------------------------------------===//
/// \name Simple mod/ref information
/// @{
/// Get the ModRef info associated with a pointer argument of a callsite. The
/// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
/// that these bits do not necessarily account for the overall behavior of
/// the function, but rather only provide additional per-argument
/// information.
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
/// Return the behavior of the given call site.
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
/// Return the behavior when calling the given function.
FunctionModRefBehavior getModRefBehavior(const Function *F);
/// Checks if the specified call is known to never read or write memory.
///
/// Note that if the call only reads from known-constant memory, it is also
/// legal to return true. Also, calls that unwind the stack are legal for
/// this predicate.
///
/// Many optimizations (such as CSE and LICM) can be performed on such calls
/// without worrying about aliasing properties, and many calls have this
/// property (e.g. calls to 'sin' and 'cos').
///
/// This property corresponds to the GCC 'const' attribute.
bool doesNotAccessMemory(ImmutableCallSite CS) {
return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory;
}
/// Checks if the specified function is known to never read or write memory.
///
/// Note that if the function only reads from known-constant memory, it is
/// also legal to return true. Also, function that unwind the stack are legal
/// for this predicate.
///
/// Many optimizations (such as CSE and LICM) can be performed on such calls
/// to such functions without worrying about aliasing properties, and many
/// functions have this property (e.g. 'sin' and 'cos').
///
/// This property corresponds to the GCC 'const' attribute.
bool doesNotAccessMemory(const Function *F) {
return getModRefBehavior(F) == FMRB_DoesNotAccessMemory;
}
/// Checks if the specified call is known to only read from non-volatile
/// memory (or not access memory at all).
///
/// Calls that unwind the stack are legal for this predicate.
///
/// This property allows many common optimizations to be performed in the
/// absence of interfering store instructions, such as CSE of strlen calls.
///
/// This property corresponds to the GCC 'pure' attribute.
bool onlyReadsMemory(ImmutableCallSite CS) {
return onlyReadsMemory(getModRefBehavior(CS));
}
/// Checks if the specified function is known to only read from non-volatile
/// memory (or not access memory at all).
///
/// Functions that unwind the stack are legal for this predicate.
///
/// This property allows many common optimizations to be performed in the
/// absence of interfering store instructions, such as CSE of strlen calls.
///
/// This property corresponds to the GCC 'pure' attribute.
bool onlyReadsMemory(const Function *F) {
return onlyReadsMemory(getModRefBehavior(F));
}
/// Checks if functions with the specified behavior are known to only read
/// from non-volatile memory (or not access memory at all).
static bool onlyReadsMemory(FunctionModRefBehavior MRB) {
return !(MRB & MRI_Mod);
}
/// Checks if functions with the specified behavior are known to only write
/// memory (or not access memory at all).
static bool doesNotReadMemory(FunctionModRefBehavior MRB) {
return !(MRB & MRI_Ref);
}
/// Checks if functions with the specified behavior are known to read and
/// write at most from objects pointed to by their pointer-typed arguments
/// (with arbitrary offsets).
static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) {
return !(MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
}
/// Checks if functions with the specified behavior are known to potentially
/// read or write from objects pointed to be their pointer-typed arguments
/// (with arbitrary offsets).
static bool doesAccessArgPointees(FunctionModRefBehavior MRB) {
return (MRB & MRI_ModRef) && (MRB & FMRL_ArgumentPointees);
}
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
/// getModRefInfo (for call sites) - A convenience wrapper.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
uint64_t Size) {
return getModRefInfo(CS, MemoryLocation(P, Size));
}
/// getModRefInfo (for calls) - Return information about whether
/// a particular call modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
return getModRefInfo(ImmutableCallSite(C), Loc);
}
/// getModRefInfo (for calls) - A convenience wrapper.
ModRefInfo getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
return getModRefInfo(C, MemoryLocation(P, Size));
}
/// getModRefInfo (for invokes) - Return information about whether
/// a particular invoke modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
return getModRefInfo(ImmutableCallSite(I), Loc);
}
/// getModRefInfo (for invokes) - A convenience wrapper.
ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// getModRefInfo (for loads) - Return information about whether
/// a particular load modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
/// getModRefInfo (for loads) - A convenience wrapper.
ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
return getModRefInfo(L, MemoryLocation(P, Size));
}
/// getModRefInfo (for stores) - Return information about whether
/// a particular store modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
/// getModRefInfo (for stores) - A convenience wrapper.
ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size) {
return getModRefInfo(S, MemoryLocation(P, Size));
}
/// getModRefInfo (for fences) - Return information about whether
/// a particular store modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
// Conservatively correct. (We could possibly be a bit smarter if
// Loc is a alloca that doesn't escape.)
return MRI_ModRef;
}
/// getModRefInfo (for fences) - A convenience wrapper.
ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size) {
return getModRefInfo(S, MemoryLocation(P, Size));
}
/// getModRefInfo (for cmpxchges) - Return information about whether
/// a particular cmpxchg modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc);
/// getModRefInfo (for cmpxchges) - A convenience wrapper.
ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
unsigned Size) {
return getModRefInfo(CX, MemoryLocation(P, Size));
}
/// getModRefInfo (for atomicrmws) - Return information about whether
/// a particular atomicrmw modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc);
/// getModRefInfo (for atomicrmws) - A convenience wrapper.
ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
unsigned Size) {
return getModRefInfo(RMW, MemoryLocation(P, Size));
}
/// getModRefInfo (for va_args) - Return information about whether
/// a particular va_arg modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
/// getModRefInfo (for va_args) - A convenience wrapper.
ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// getModRefInfo (for catchpads) - Return information about whether
/// a particular catchpad modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc);
/// getModRefInfo (for catchpads) - A convenience wrapper.
ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// getModRefInfo (for catchrets) - Return information about whether
/// a particular catchret modifies or reads the specified memory location.
ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc);
/// getModRefInfo (for catchrets) - A convenience wrapper.
ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// Check whether or not an instruction may read or write memory (without
/// regard to a specific location).
///
/// For function calls, this delegates to the alias-analysis specific
/// call-site mod-ref behavior queries. Otherwise it delegates to the generic
/// mod ref information query without a location.
ModRefInfo getModRefInfo(const Instruction *I) {
if (auto CS = ImmutableCallSite(I)) {
auto MRB = getModRefBehavior(CS);
if ((MRB & MRI_ModRef) == MRI_ModRef)
return MRI_ModRef;
if (MRB & MRI_Ref)
return MRI_Ref;
if (MRB & MRI_Mod)
return MRI_Mod;
return MRI_NoModRef;
}
return getModRefInfo(I, MemoryLocation());
}
/// Check whether or not an instruction may read or write the specified
/// memory location.
///
/// An instruction that doesn't read or write memory may be trivially LICM'd
/// for example.
///
/// This primarily delegates to specific helpers above.
ModRefInfo getModRefInfo(const Instruction *I, const MemoryLocation &Loc) {
switch (I->getOpcode()) {
case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, Loc);
case Instruction::Load: return getModRefInfo((const LoadInst*)I, Loc);
case Instruction::Store: return getModRefInfo((const StoreInst*)I, Loc);
case Instruction::Fence: return getModRefInfo((const FenceInst*)I, Loc);
case Instruction::AtomicCmpXchg:
return getModRefInfo((const AtomicCmpXchgInst*)I, Loc);
case Instruction::AtomicRMW:
return getModRefInfo((const AtomicRMWInst*)I, Loc);
case Instruction::Call: return getModRefInfo((const CallInst*)I, Loc);
case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,Loc);
case Instruction::CatchPad:
return getModRefInfo((const CatchPadInst *)I, Loc);
case Instruction::CatchRet:
return getModRefInfo((const CatchReturnInst *)I, Loc);
default:
return MRI_NoModRef;
}
}
/// A convenience wrapper for constructing the memory location.
ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
uint64_t Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
/// Return information about whether a call and an instruction may refer to
/// the same memory locations.
ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call);
/// Return information about whether two call sites may refer to the same set
/// of memory locations. See the AA documentation for details:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
/// \brief Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
/// instruction ordering queries inside the BasicBlock containing \p I.
ModRefInfo callCapturesBefore(const Instruction *I,
const MemoryLocation &MemLoc, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr);
/// \brief A convenience wrapper to synthesize a memory location.
ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
uint64_t Size, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr) {
return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB);
}
/// @}
//===--------------------------------------------------------------------===//
/// \name Higher level methods for querying mod/ref information.
/// @{
/// Check if it is possible for execution of the specified basic block to
/// modify the location Loc.
bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
/// A convenience wrapper synthesizing a memory location.
bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
uint64_t Size) {
return canBasicBlockModify(BB, MemoryLocation(P, Size));
}
/// Check if it is possible for the execution of the specified instructions
/// to mod\ref (according to the mode) the location Loc.
///
/// The instructions to consider are all of the instructions in the range of
/// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
const MemoryLocation &Loc,
const ModRefInfo Mode);
/// A convenience wrapper synthesizing a memory location.
bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
const Value *Ptr, uint64_t Size,
const ModRefInfo Mode) {
return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
}
private:
class Concept;
template <typename T> class Model;
template <typename T> friend class AAResultBase;
const TargetLibraryInfo &TLI;
std::vector<std::unique_ptr<Concept>> AAs;
};
/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
/// pointer or reference.
typedef AAResults AliasAnalysis;
/// A private abstract base class describing the concept of an individual alias
/// analysis implementation.
///
/// This interface is implemented by any \c Model instantiation. It is also the
/// interface which a type used to instantiate the model must provide.
///
/// All of these methods model methods by the same name in the \c
/// AAResults class. Only differences and specifics to how the
/// implementations are called are documented here.
class AAResults::Concept {
public:
virtual ~Concept() = 0;
/// An update API used internally by the AAResults to provide
/// a handle back to the top level aggregation.
virtual void setAAResults(AAResults *NewAAR) = 0;
//===--------------------------------------------------------------------===//
/// \name Alias Queries
/// @{
/// The main low level interface to the alias analysis implementation.
/// Returns an AliasResult indicating whether the two pointers are aliased to
/// each other. This is the interface that must be implemented by specific
/// alias analysis implementations.
virtual AliasResult alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) = 0;
/// Checks whether the given location points to constant memory, or if
/// \p OrLocal is true whether it points to a local alloca.
virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) = 0;
/// @}
//===--------------------------------------------------------------------===//
/// \name Simple mod/ref information
/// @{
/// Get the ModRef info associated with a pointer argument of a callsite. The
/// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
/// that these bits do not necessarily account for the overall behavior of
/// the function, but rather only provide additional per-argument
/// information.
virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS,
unsigned ArgIdx) = 0;
/// Return the behavior of the given call site.
virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0;
/// Return the behavior when calling the given function.
virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
/// getModRefInfo (for call sites) - Return information about whether
/// a particular call site modifies or reads the specified memory location.
virtual ModRefInfo getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) = 0;
/// Return information about whether two call sites may refer to the same set
/// of memory locations. See the AA documentation for details:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) = 0;
/// @}
};
/// A private class template which derives from \c Concept and wraps some other
/// type.
///
/// This models the concept by directly forwarding each interface point to the
/// wrapped type which must implement a compatible interface. This provides
/// a type erased binding.
template <typename AAResultT> class AAResults::Model final : public Concept {
AAResultT &Result;
public:
explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) {
Result.setAAResults(&AAR);
}
~Model() override {}
void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); }
AliasResult alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) override {
return Result.alias(LocA, LocB);
}
bool pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) override {
return Result.pointsToConstantMemory(Loc, OrLocal);
}
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override {
return Result.getArgModRefInfo(CS, ArgIdx);
}
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
return Result.getModRefBehavior(CS);
}
FunctionModRefBehavior getModRefBehavior(const Function *F) override {
return Result.getModRefBehavior(F);
}
ModRefInfo getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) override {
return Result.getModRefInfo(CS, Loc);
}
ModRefInfo getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return Result.getModRefInfo(CS1, CS2);
}
};
/// A CRTP-driven "mixin" base class to help implement the function alias
/// analysis results concept.
///
/// Because of the nature of many alias analysis implementations, they often
/// only implement a subset of the interface. This base class will attempt to
/// implement the remaining portions of the interface in terms of simpler forms
/// of the interface where possible, and otherwise provide conservatively
/// correct fallback implementations.
///
/// Implementors of an alias analysis should derive from this CRTP, and then
/// override specific methods that they wish to customize. There is no need to
/// use virtual anywhere, the CRTP base class does static dispatch to the
/// derived type passed into it.
template <typename DerivedT> class AAResultBase {
// Expose some parts of the interface only to the AAResults::Model
// for wrapping. Specifically, this allows the model to call our
// setAAResults method without exposing it as a fully public API.
friend class AAResults::Model<DerivedT>;
/// A pointer to the AAResults object that this AAResult is
/// aggregated within. May be null if not aggregated.
AAResults *AAR;
/// Helper to dispatch calls back through the derived type.
DerivedT &derived() { return static_cast<DerivedT &>(*this); }
/// A setter for the AAResults pointer, which is used to satisfy the
/// AAResults::Model contract.
void setAAResults(AAResults *NewAAR) { AAR = NewAAR; }
protected:
/// This proxy class models a common pattern where we delegate to either the
/// top-level \c AAResults aggregation if one is registered, or to the
/// current result if none are registered.
class AAResultsProxy {
AAResults *AAR;
DerivedT &CurrentResult;
public:
AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult)
: AAR(AAR), CurrentResult(CurrentResult) {}
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
return AAR ? AAR->alias(LocA, LocB) : CurrentResult.alias(LocA, LocB);
}
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
return AAR ? AAR->pointsToConstantMemory(Loc, OrLocal)
: CurrentResult.pointsToConstantMemory(Loc, OrLocal);
}
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx);
}
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS);
}
FunctionModRefBehavior getModRefBehavior(const Function *F) {
return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
}
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
return AAR ? AAR->getModRefInfo(CS, Loc)
: CurrentResult.getModRefInfo(CS, Loc);
}
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2);
}
};
explicit AAResultBase() {}
// Provide all the copy and move constructors so that derived types aren't
// constrained.
AAResultBase(const AAResultBase &Arg) {}
AAResultBase(AAResultBase &&Arg) {}
/// Get a proxy for the best AA result set to query at this time.
///
/// When this result is part of a larger aggregation, this will proxy to that
/// aggregation. When this result is used in isolation, it will just delegate
/// back to the derived class's implementation.
///
/// Note that callers of this need to take considerable care to not cause
/// performance problems when they use this routine, in the case of a large
/// number of alias analyses being aggregated, it can be expensive to walk
/// back across the chain.
AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); }
public:
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
return MayAlias;
}
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
return false;
}
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
return MRI_ModRef;
}
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
return FMRB_UnknownModRefBehavior;
}
FunctionModRefBehavior getModRefBehavior(const Function *F) {
return FMRB_UnknownModRefBehavior;
}
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
return MRI_ModRef;
}
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
return MRI_ModRef;
}
};
/// Return true if this pointer is returned by a noalias function.
bool isNoAliasCall(const Value *V);
/// Return true if this is an argument with the noalias attribute.
bool isNoAliasArgument(const Value *V);
/// Return true if this pointer refers to a distinct and identifiable object.
/// This returns true for:
/// Global Variables and Functions (but not Global Aliases)
/// Allocas
/// ByVal and NoAlias Arguments
/// NoAlias returns (e.g. calls to malloc)
///
bool isIdentifiedObject(const Value *V);
/// Return true if V is umabigously identified at the function-level.
/// Different IdentifiedFunctionLocals can't alias.
/// Further, an IdentifiedFunctionLocal can not alias with any function
/// arguments other than itself, which is not necessarily true for
/// IdentifiedObjects.
bool isIdentifiedFunctionLocal(const Value *V);
/// A manager for alias analyses.
///
/// This class can have analyses registered with it and when run, it will run
/// all of them and aggregate their results into single AA results interface
/// that dispatches across all of the alias analysis results available.
///
/// Note that the order in which analyses are registered is very significant.
/// That is the order in which the results will be aggregated and queried.
///
/// This manager effectively wraps the AnalysisManager for registering alias
/// analyses. When you register your alias analysis with this manager, it will
/// ensure the analysis itself is registered with its AnalysisManager.
class AAManager : public AnalysisInfoMixin<AAManager> {
public:
typedef AAResults Result;
// This type hase value semantics. We have to spell these out because MSVC
// won't synthesize them.
AAManager() {}
AAManager(AAManager &&Arg) : ResultGetters(std::move(Arg.ResultGetters)) {}
AAManager(const AAManager &Arg) : ResultGetters(Arg.ResultGetters) {}
AAManager &operator=(AAManager &&RHS) {
ResultGetters = std::move(RHS.ResultGetters);
return *this;
}
AAManager &operator=(const AAManager &RHS) {
ResultGetters = RHS.ResultGetters;
return *this;
}
/// Register a specific AA result.
template <typename AnalysisT> void registerFunctionAnalysis() {
ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
}
/// Register a specific AA result.
template <typename AnalysisT> void registerModuleAnalysis() {
ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
}
Result run(Function &F, AnalysisManager<Function> &AM) {
Result R(AM.getResult<TargetLibraryAnalysis>(F));
for (auto &Getter : ResultGetters)
(*Getter)(F, AM, R);
return R;
}
private:
friend AnalysisInfoMixin<AAManager>;
static char PassID;
SmallVector<void (*)(Function &F, AnalysisManager<Function> &AM,
AAResults &AAResults),
4> ResultGetters;
template <typename AnalysisT>
static void getFunctionAAResultImpl(Function &F,
AnalysisManager<Function> &AM,
AAResults &AAResults) {
AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
}
template <typename AnalysisT>
static void getModuleAAResultImpl(Function &F, AnalysisManager<Function> &AM,
AAResults &AAResults) {
auto &MAM =
AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
if (auto *R = MAM.template getCachedResult<AnalysisT>(*F.getParent()))
AAResults.addAAResult(*R);
}
};
/// A wrapper pass to provide the legacy pass manager access to a suitably
/// prepared AAResults object.
class AAResultsWrapperPass : public FunctionPass {
std::unique_ptr<AAResults> AAR;
public:
static char ID;
AAResultsWrapperPass();
AAResults &getAAResults() { return *AAR; }
const AAResults &getAAResults() const { return *AAR; }
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
FunctionPass *createAAResultsWrapperPass();
/// A wrapper pass around a callback which can be used to populate the
/// AAResults in the AAResultsWrapperPass from an external AA.
///
/// The callback provided here will be used each time we prepare an AAResults
/// object, and will receive a reference to the function wrapper pass, the
/// function, and the AAResults object to populate. This should be used when
/// setting up a custom pass pipeline to inject a hook into the AA results.
ImmutablePass *createExternalAAWrapperPass(
std::function<void(Pass &, Function &, AAResults &)> Callback);
/// A helper for the legacy pass manager to create a \c AAResults
/// object populated to the best of our ability for a particular function when
/// inside of a \c ModulePass or a \c CallGraphSCCPass.
///
/// If a \c ModulePass or a \c CallGraphSCCPass calls \p
/// createLegacyPMAAResults, it also needs to call \p addUsedAAAnalyses in \p
/// getAnalysisUsage.
AAResults createLegacyPMAAResults(Pass &P, Function &F, BasicAAResult &BAR);
/// A helper for the legacy pass manager to populate \p AU to add uses to make
/// sure the analyses required by \p createLegacyPMAAResults are available.
void getAAResultsAnalysisUsage(AnalysisUsage &AU);
} // End llvm namespace
#endif

View File

@ -1,70 +0,0 @@
//===- AliasAnalysisEvaluator.h - Alias Analysis Accuracy Evaluator -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file implements a simple N^2 alias analysis accuracy evaluator. The
/// analysis result is a set of statistics of how many times the AA
/// infrastructure provides each kind of alias result and mod/ref result when
/// queried with all pairs of pointers in the function.
///
/// It can be used to evaluate a change in an alias analysis implementation,
/// algorithm, or the AA pipeline infrastructure itself. It acts like a stable
/// and easily tested consumer of all AA information exposed.
///
/// This is inspired and adapted from code by: Naveen Neelakantam, Francesco
/// Spadini, and Wojciech Stryjewski.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
#define LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
class AAResults;
class AAEvaluator : public PassInfoMixin<AAEvaluator> {
int64_t FunctionCount;
int64_t NoAliasCount, MayAliasCount, PartialAliasCount, MustAliasCount;
int64_t NoModRefCount, ModCount, RefCount, ModRefCount;
public:
AAEvaluator()
: FunctionCount(), NoAliasCount(), MayAliasCount(), PartialAliasCount(),
MustAliasCount(), NoModRefCount(), ModCount(), RefCount(),
ModRefCount() {}
AAEvaluator(AAEvaluator &&Arg)
: FunctionCount(Arg.FunctionCount), NoAliasCount(Arg.NoAliasCount),
MayAliasCount(Arg.MayAliasCount),
PartialAliasCount(Arg.PartialAliasCount),
MustAliasCount(Arg.MustAliasCount), NoModRefCount(Arg.NoModRefCount),
ModCount(Arg.ModCount), RefCount(Arg.RefCount),
ModRefCount(Arg.ModRefCount) {
Arg.FunctionCount = 0;
}
~AAEvaluator();
/// \brief Run the pass over the function.
PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
private:
// Allow the legacy pass to run this using an internal API.
friend class AAEvalLegacyPass;
void runInternal(Function &F, AAResults &AA);
};
/// Create a wrapper of the above for the legacy pass manager.
FunctionPass *createAAEvalPass();
}
#endif

View File

@ -1,447 +0,0 @@
//===- llvm/Analysis/AliasSetTracker.h - Build Alias Sets -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines two classes: AliasSetTracker and AliasSet. These interfaces
// are used to classify a collection of pointer references into a maximal number
// of disjoint sets. Each AliasSet object constructed by the AliasSetTracker
// object refers to memory disjoint from the other sets.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
#define LLVM_ANALYSIS_ALIASSETTRACKER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ValueHandle.h"
#include <vector>
namespace llvm {
class LoadInst;
class StoreInst;
class VAArgInst;
class MemSetInst;
class AliasSetTracker;
class AliasSet;
class AliasSet : public ilist_node<AliasSet> {
friend class AliasSetTracker;
class PointerRec {
Value *Val; // The pointer this record corresponds to.
PointerRec **PrevInList, *NextInList;
AliasSet *AS;
uint64_t Size;
AAMDNodes AAInfo;
public:
PointerRec(Value *V)
: Val(V), PrevInList(nullptr), NextInList(nullptr), AS(nullptr), Size(0),
AAInfo(DenseMapInfo<AAMDNodes>::getEmptyKey()) {}
Value *getValue() const { return Val; }
PointerRec *getNext() const { return NextInList; }
bool hasAliasSet() const { return AS != nullptr; }
PointerRec** setPrevInList(PointerRec **PIL) {
PrevInList = PIL;
return &NextInList;
}
bool updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) {
bool SizeChanged = false;
if (NewSize > Size) {
Size = NewSize;
SizeChanged = true;
}
if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey())
// We don't have a AAInfo yet. Set it to NewAAInfo.
AAInfo = NewAAInfo;
else if (AAInfo != NewAAInfo)
// NewAAInfo conflicts with AAInfo.
AAInfo = DenseMapInfo<AAMDNodes>::getTombstoneKey();
return SizeChanged;
}
uint64_t getSize() const { return Size; }
/// Return the AAInfo, or null if there is no information or conflicting
/// information.
AAMDNodes getAAInfo() const {
// If we have missing or conflicting AAInfo, return null.
if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey() ||
AAInfo == DenseMapInfo<AAMDNodes>::getTombstoneKey())
return AAMDNodes();
return AAInfo;
}
AliasSet *getAliasSet(AliasSetTracker &AST) {
assert(AS && "No AliasSet yet!");
if (AS->Forward) {
AliasSet *OldAS = AS;
AS = OldAS->getForwardedTarget(AST);
AS->addRef();
OldAS->dropRef(AST);
}
return AS;
}
void setAliasSet(AliasSet *as) {
assert(!AS && "Already have an alias set!");
AS = as;
}
void eraseFromList() {
if (NextInList) NextInList->PrevInList = PrevInList;
*PrevInList = NextInList;
if (AS->PtrListEnd == &NextInList) {
AS->PtrListEnd = PrevInList;
assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
}
delete this;
}
};
PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes.
AliasSet *Forward; // Forwarding pointer.
/// All instructions without a specific address in this alias set.
std::vector<AssertingVH<Instruction> > UnknownInsts;
/// Number of nodes pointing to this AliasSet plus the number of AliasSets
/// forwarding to it.
unsigned RefCount : 28;
/// The kinds of access this alias set models.
///
/// We keep track of whether this alias set merely refers to the locations of
/// memory (and not any particular access), whether it modifies or references
/// the memory, or whether it does both. The lattice goes from "NoAccess" to
/// either RefAccess or ModAccess, then to ModRefAccess as necessary.
enum AccessLattice {
NoAccess = 0,
RefAccess = 1,
ModAccess = 2,
ModRefAccess = RefAccess | ModAccess
};
unsigned Access : 2;
/// The kind of alias relationship between pointers of the set.
///
/// These represent conservatively correct alias results between any members
/// of the set. We represent these independently of the values of alias
/// results in order to pack it into a single bit. Lattice goes from
/// MustAlias to MayAlias.
enum AliasLattice {
SetMustAlias = 0, SetMayAlias = 1
};
unsigned Alias : 1;
/// True if this alias set contains volatile loads or stores.
unsigned Volatile : 1;
void addRef() { ++RefCount; }
void dropRef(AliasSetTracker &AST) {
assert(RefCount >= 1 && "Invalid reference count detected!");
if (--RefCount == 0)
removeFromTracker(AST);
}
Instruction *getUnknownInst(unsigned i) const {
assert(i < UnknownInsts.size());
return UnknownInsts[i];
}
public:
/// Accessors...
bool isRef() const { return Access & RefAccess; }
bool isMod() const { return Access & ModAccess; }
bool isMustAlias() const { return Alias == SetMustAlias; }
bool isMayAlias() const { return Alias == SetMayAlias; }
/// Return true if this alias set contains volatile loads or stores.
bool isVolatile() const { return Volatile; }
/// Return true if this alias set should be ignored as part of the
/// AliasSetTracker object.
bool isForwardingAliasSet() const { return Forward; }
/// Merge the specified alias set into this alias set.
void mergeSetIn(AliasSet &AS, AliasSetTracker &AST);
// Alias Set iteration - Allow access to all of the pointers which are part of
// this alias set.
class iterator;
iterator begin() const { return iterator(PtrList); }
iterator end() const { return iterator(); }
bool empty() const { return PtrList == nullptr; }
void print(raw_ostream &OS) const;
void dump() const;
/// Define an iterator for alias sets... this is just a forward iterator.
class iterator : public std::iterator<std::forward_iterator_tag,
PointerRec, ptrdiff_t> {
PointerRec *CurNode;
public:
explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
bool operator==(const iterator& x) const {
return CurNode == x.CurNode;
}
bool operator!=(const iterator& x) const { return !operator==(x); }
value_type &operator*() const {
assert(CurNode && "Dereferencing AliasSet.end()!");
return *CurNode;
}
value_type *operator->() const { return &operator*(); }
Value *getPointer() const { return CurNode->getValue(); }
uint64_t getSize() const { return CurNode->getSize(); }
AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }
iterator& operator++() { // Preincrement
assert(CurNode && "Advancing past AliasSet.end()!");
CurNode = CurNode->getNext();
return *this;
}
iterator operator++(int) { // Postincrement
iterator tmp = *this; ++*this; return tmp;
}
};
private:
// Can only be created by AliasSetTracker. Also, ilist creates one
// to serve as a sentinel.
friend struct ilist_sentinel_traits<AliasSet>;
AliasSet()
: PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
Access(NoAccess), Alias(SetMustAlias), Volatile(false) {
}
AliasSet(const AliasSet &AS) = delete;
void operator=(const AliasSet &AS) = delete;
PointerRec *getSomePointer() const {
return PtrList;
}
/// Return the real alias set this represents. If this has been merged with
/// another set and is forwarding, return the ultimate destination set. This
/// also implements the union-find collapsing as well.
AliasSet *getForwardedTarget(AliasSetTracker &AST) {
if (!Forward) return this;
AliasSet *Dest = Forward->getForwardedTarget(AST);
if (Dest != Forward) {
Dest->addRef();
Forward->dropRef(AST);
Forward = Dest;
}
return Dest;
}
void removeFromTracker(AliasSetTracker &AST);
void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
const AAMDNodes &AAInfo,
bool KnownMustAlias = false);
void addUnknownInst(Instruction *I, AliasAnalysis &AA);
void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
bool WasEmpty = UnknownInsts.empty();
for (size_t i = 0, e = UnknownInsts.size(); i != e; ++i)
if (UnknownInsts[i] == I) {
UnknownInsts[i] = UnknownInsts.back();
UnknownInsts.pop_back();
--i; --e; // Revisit the moved entry.
}
if (!WasEmpty && UnknownInsts.empty())
dropRef(AST);
}
void setVolatile() { Volatile = true; }
public:
/// Return true if the specified pointer "may" (or must) alias one of the
/// members in the set.
bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo,
AliasAnalysis &AA) const;
bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const;
};
inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
AS.print(OS);
return OS;
}
class AliasSetTracker {
/// A CallbackVH to arrange for AliasSetTracker to be notified whenever a
/// Value is deleted.
class ASTCallbackVH final : public CallbackVH {
AliasSetTracker *AST;
void deleted() override;
void allUsesReplacedWith(Value *) override;
public:
ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
ASTCallbackVH &operator=(Value *V);
};
/// Traits to tell DenseMap that tell us how to compare and hash the value
/// handle.
struct ASTCallbackVHDenseMapInfo : public DenseMapInfo<Value *> {};
AliasAnalysis &AA;
ilist<AliasSet> AliasSets;
typedef DenseMap<ASTCallbackVH, AliasSet::PointerRec*,
ASTCallbackVHDenseMapInfo>
PointerMapType;
// Map from pointers to their node
PointerMapType PointerMap;
public:
/// Create an empty collection of AliasSets, and use the specified alias
/// analysis object to disambiguate load and store addresses.
explicit AliasSetTracker(AliasAnalysis &aa) : AA(aa) {}
~AliasSetTracker() { clear(); }
/// These methods are used to add different types of instructions to the alias
/// sets. Adding a new instruction can result in one of three actions
/// happening:
///
/// 1. If the instruction doesn't alias any other sets, create a new set.
/// 2. If the instruction aliases exactly one set, add it to the set
/// 3. If the instruction aliases multiple sets, merge the sets, and add
/// the instruction to the result.
///
/// These methods return true if inserting the instruction resulted in the
/// addition of a new alias set (i.e., the pointer did not alias anything).
///
bool add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
bool add(LoadInst *LI);
bool add(StoreInst *SI);
bool add(VAArgInst *VAAI);
bool add(MemSetInst *MSI);
bool add(Instruction *I); // Dispatch to one of the other add methods...
void add(BasicBlock &BB); // Add all instructions in basic block
void add(const AliasSetTracker &AST); // Add alias relations from another AST
bool addUnknown(Instruction *I);
/// These methods are used to remove all entries that might be aliased by the
/// specified instruction. These methods return true if any alias sets were
/// eliminated.
bool remove(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo);
bool remove(LoadInst *LI);
bool remove(StoreInst *SI);
bool remove(VAArgInst *VAAI);
bool remove(MemSetInst *MSI);
bool remove(Instruction *I);
void remove(AliasSet &AS);
bool removeUnknown(Instruction *I);
void clear();
/// Return the alias sets that are active.
const ilist<AliasSet> &getAliasSets() const { return AliasSets; }
/// Return the alias set that the specified pointer lives in. If the New
/// argument is non-null, this method sets the value to true if a new alias
/// set is created to contain the pointer (because the pointer didn't alias
/// anything).
AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
const AAMDNodes &AAInfo,
bool *New = nullptr);
/// Return the alias set containing the location specified if one exists,
/// otherwise return null.
AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size,
const AAMDNodes &AAInfo) {
return mergeAliasSetsForPointer(P, Size, AAInfo);
}
/// Return true if the specified location is represented by this alias set,
/// false otherwise. This does not modify the AST object or alias sets.
bool containsPointer(const Value *P, uint64_t Size,
const AAMDNodes &AAInfo) const;
/// Return true if the specified instruction "may" (or must) alias one of the
/// members in any of the sets.
bool containsUnknown(const Instruction *I) const;
/// Return the underlying alias analysis object used by this tracker.
AliasAnalysis &getAliasAnalysis() const { return AA; }
/// This method is used to remove a pointer value from the AliasSetTracker
/// entirely. It should be used when an instruction is deleted from the
/// program to update the AST. If you don't use this, you would have dangling
/// pointers to deleted instructions.
void deleteValue(Value *PtrVal);
/// This method should be used whenever a preexisting value in the program is
/// copied or cloned, introducing a new value. Note that it is ok for clients
/// that use this method to introduce the same value multiple times: if the
/// tracker already knows about a value, it will ignore the request.
void copyValue(Value *From, Value *To);
typedef ilist<AliasSet>::iterator iterator;
typedef ilist<AliasSet>::const_iterator const_iterator;
const_iterator begin() const { return AliasSets.begin(); }
const_iterator end() const { return AliasSets.end(); }
iterator begin() { return AliasSets.begin(); }
iterator end() { return AliasSets.end(); }
void print(raw_ostream &OS) const;
void dump() const;
private:
friend class AliasSet;
void removeAliasSet(AliasSet *AS);
/// Just like operator[] on the map, except that it creates an entry for the
/// pointer if it doesn't already exist.
AliasSet::PointerRec &getEntryFor(Value *V) {
AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
if (!Entry)
Entry = new AliasSet::PointerRec(V);
return *Entry;
}
AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
AliasSet::AccessLattice E,
bool &NewSet) {
NewSet = false;
AliasSet &AS = getAliasSetForPointer(P, Size, AAInfo, &NewSet);
AS.Access |= E;
return AS;
}
AliasSet *mergeAliasSetsForPointer(const Value *Ptr, uint64_t Size,
const AAMDNodes &AAInfo);
AliasSet *findAliasSetForUnknownInst(Instruction *Inst);
};
inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
AST.print(OS);
return OS;
}
} // End llvm namespace
#endif

View File

@ -1,174 +0,0 @@
//===- llvm/Analysis/AssumptionCache.h - Track @llvm.assume ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that keeps track of @llvm.assume intrinsics in
// the functions of a module (allowing assumptions within any function to be
// found cheaply by other parts of the optimizer).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
#define LLVM_ANALYSIS_ASSUMPTIONCACHE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>
namespace llvm {
/// \brief A cache of @llvm.assume calls within a function.
///
/// This cache provides fast lookup of assumptions within a function by caching
/// them and amortizing the cost of scanning for them across all queries. The
/// cache is also conservatively self-updating so that it will never return
/// incorrect results about a function even as the function is being mutated.
/// However, flushing the cache and rebuilding it (or explicitly updating it)
/// may allow it to discover new assumptions.
class AssumptionCache {
/// \brief The function for which this cache is handling assumptions.
///
/// We track this to lazily populate our assumptions.
Function &F;
/// \brief Vector of weak value handles to calls of the @llvm.assume
/// intrinsic.
SmallVector<WeakVH, 4> AssumeHandles;
/// \brief Flag tracking whether we have scanned the function yet.
///
/// We want to be as lazy about this as possible, and so we scan the function
/// at the last moment.
bool Scanned;
/// \brief Scan the function for assumptions and add them to the cache.
void scanFunction();
public:
/// \brief Construct an AssumptionCache from a function by scanning all of
/// its instructions.
AssumptionCache(Function &F) : F(F), Scanned(false) {}
/// \brief Add an @llvm.assume intrinsic to this function's cache.
///
/// The call passed in must be an instruction within this function and must
/// not already be in the cache.
void registerAssumption(CallInst *CI);
/// \brief Clear the cache of @llvm.assume intrinsics for a function.
///
/// It will be re-scanned the next time it is requested.
void clear() {
AssumeHandles.clear();
Scanned = false;
}
/// \brief Access the list of assumption handles currently tracked for this
/// function.
///
/// Note that these produce weak handles that may be null. The caller must
/// handle that case.
/// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
/// when we can write that to filter out the null values. Then caller code
/// will become simpler.
MutableArrayRef<WeakVH> assumptions() {
if (!Scanned)
scanFunction();
return AssumeHandles;
}
};
/// \brief A function analysis which provides an \c AssumptionCache.
///
/// This analysis is intended for use with the new pass manager and will vend
/// assumption caches for a given function.
class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
friend AnalysisInfoMixin<AssumptionAnalysis>;
static char PassID;
public:
typedef AssumptionCache Result;
AssumptionAnalysis() {}
AssumptionAnalysis(const AssumptionAnalysis &Arg) {}
AssumptionAnalysis(AssumptionAnalysis &&Arg) {}
AssumptionAnalysis &operator=(const AssumptionAnalysis &RHS) { return *this; }
AssumptionAnalysis &operator=(AssumptionAnalysis &&RHS) { return *this; }
AssumptionCache run(Function &F, FunctionAnalysisManager &) {
return AssumptionCache(F);
}
};
/// \brief Printer pass for the \c AssumptionAnalysis results.
class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
raw_ostream &OS;
public:
explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
};
/// \brief An immutable pass that tracks lazily created \c AssumptionCache
/// objects.
///
/// This is essentially a workaround for the legacy pass manager's weaknesses
/// which associates each assumption cache with Function and clears it if the
/// function is deleted. The nature of the AssumptionCache is that it is not
/// invalidated by any changes to the function body and so this is sufficient
/// to be conservatively correct.
class AssumptionCacheTracker : public ImmutablePass {
/// A callback value handle applied to function objects, which we use to
/// delete our cache of intrinsics for a function when it is deleted.
class FunctionCallbackVH final : public CallbackVH {
AssumptionCacheTracker *ACT;
void deleted() override;
public:
typedef DenseMapInfo<Value *> DMI;
FunctionCallbackVH(Value *V, AssumptionCacheTracker *ACT = nullptr)
: CallbackVH(V), ACT(ACT) {}
};
friend FunctionCallbackVH;
typedef DenseMap<FunctionCallbackVH, std::unique_ptr<AssumptionCache>,
FunctionCallbackVH::DMI> FunctionCallsMap;
FunctionCallsMap AssumptionCaches;
public:
/// \brief Get the cached assumptions for a function.
///
/// If no assumptions are cached, this will scan the function. Otherwise, the
/// existing cache will be returned.
AssumptionCache &getAssumptionCache(Function &F);
AssumptionCacheTracker();
~AssumptionCacheTracker() override;
void releaseMemory() override { AssumptionCaches.shrink_and_clear(); }
void verifyAnalysis() const override;
bool doFinalization(Module &) override {
verifyAnalysis();
return false;
}
static char ID; // Pass identification, replacement for typeid
};
} // end namespace llvm
#endif

View File

@ -1,235 +0,0 @@
//===- BasicAliasAnalysis.h - Stateless, local Alias Analysis ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for LLVM's primary stateless and local alias analysis.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BASICALIASANALYSIS_H
#define LLVM_ANALYSIS_BASICALIASANALYSIS_H
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
class AssumptionCache;
class DominatorTree;
class LoopInfo;
/// This is the AA result object for the basic, local, and stateless alias
/// analysis. It implements the AA query interface in an entirely stateless
/// manner. As one consequence, it is never invalidated. While it does retain
/// some storage, that is used as an optimization and not to preserve
/// information from query to query.
class BasicAAResult : public AAResultBase<BasicAAResult> {
friend AAResultBase<BasicAAResult>;
const DataLayout &DL;
const TargetLibraryInfo &TLI;
AssumptionCache &AC;
DominatorTree *DT;
LoopInfo *LI;
public:
BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI,
AssumptionCache &AC, DominatorTree *DT = nullptr,
LoopInfo *LI = nullptr)
: AAResultBase(), DL(DL), TLI(TLI), AC(AC), DT(DT), LI(LI) {}
BasicAAResult(const BasicAAResult &Arg)
: AAResultBase(Arg), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
LI(Arg.LI) {}
BasicAAResult(BasicAAResult &&Arg)
: AAResultBase(std::move(Arg)), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC),
DT(Arg.DT), LI(Arg.LI) {}
/// Handle invalidation events from the new pass manager.
///
/// By definition, this result is stateless and so remains valid.
bool invalidate(Function &, const PreservedAnalyses &) { return false; }
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
/// Chases pointers until we find a (constant global) or not.
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
/// Get the location associated with a pointer argument of a callsite.
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
/// Returns the behavior when calling the given call site.
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
/// Returns the behavior when calling the given function. For use when the
/// call site is not known.
FunctionModRefBehavior getModRefBehavior(const Function *F);
private:
// A linear transformation of a Value; this class represents ZExt(SExt(V,
// SExtBits), ZExtBits) * Scale + Offset.
struct VariableGEPIndex {
// An opaque Value - we can't decompose this further.
const Value *V;
// We need to track what extensions we've done as we consider the same Value
// with different extensions as different variables in a GEP's linear
// expression;
// e.g.: if V == -1, then sext(x) != zext(x).
unsigned ZExtBits;
unsigned SExtBits;
int64_t Scale;
bool operator==(const VariableGEPIndex &Other) const {
return V == Other.V && ZExtBits == Other.ZExtBits &&
SExtBits == Other.SExtBits && Scale == Other.Scale;
}
bool operator!=(const VariableGEPIndex &Other) const {
return !operator==(Other);
}
};
// Represents the internal structure of a GEP, decomposed into a base pointer,
// constant offsets, and variable scaled indices.
struct DecomposedGEP {
// Base pointer of the GEP
const Value *Base;
// Total constant offset w.r.t the base from indexing into structs
int64_t StructOffset;
// Total constant offset w.r.t the base from indexing through
// pointers/arrays/vectors
int64_t OtherOffset;
// Scaled variable (non-constant) indices.
SmallVector<VariableGEPIndex, 4> VarIndices;
};
/// Track alias queries to guard against recursion.
typedef std::pair<MemoryLocation, MemoryLocation> LocPair;
typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
AliasCacheTy AliasCache;
/// Tracks phi nodes we have visited.
///
/// When interpret "Value" pointer equality as value equality we need to make
/// sure that the "Value" is not part of a cycle. Otherwise, two uses could
/// come from different "iterations" of a cycle and see different values for
/// the same "Value" pointer.
///
/// The following example shows the problem:
/// %p = phi(%alloca1, %addr2)
/// %l = load %ptr
/// %addr1 = gep, %alloca2, 0, %l
/// %addr2 = gep %alloca2, 0, (%l + 1)
/// alias(%p, %addr1) -> MayAlias !
/// store %l, ...
SmallPtrSet<const BasicBlock *, 8> VisitedPhiBBs;
/// Tracks instructions visited by pointsToConstantMemory.
SmallPtrSet<const Value *, 16> Visited;
static const Value *
GetLinearExpression(const Value *V, APInt &Scale, APInt &Offset,
unsigned &ZExtBits, unsigned &SExtBits,
const DataLayout &DL, unsigned Depth, AssumptionCache *AC,
DominatorTree *DT, bool &NSW, bool &NUW);
static bool DecomposeGEPExpression(const Value *V, DecomposedGEP &Decomposed,
const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT);
static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
uint64_t ObjectAccessSize);
/// \brief A Heuristic for aliasGEP that searches for a constant offset
/// between the variables.
///
/// GetLinearExpression has some limitations, as generally zext(%x + 1)
/// != zext(%x) + zext(1) if the arithmetic overflows. GetLinearExpression
/// will therefore conservatively refuse to decompose these expressions.
/// However, we know that, for all %x, zext(%x) != zext(%x + 1), even if
/// the addition overflows.
bool
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
uint64_t V1Size, uint64_t V2Size, int64_t BaseOffset,
AssumptionCache *AC, DominatorTree *DT);
bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
const SmallVectorImpl<VariableGEPIndex> &Src);
AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
const AAMDNodes &V1AAInfo, const Value *V2,
uint64_t V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
uint64_t V2Size, const AAMDNodes &V2AAInfo);
AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
const AAMDNodes &SIAAInfo, const Value *V2,
uint64_t V2Size, const AAMDNodes &V2AAInfo);
AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag,
const Value *V2, uint64_t V2Size, AAMDNodes V2AATag);
};
/// Analysis pass providing a never-invalidated alias analysis result.
class BasicAA : public AnalysisInfoMixin<BasicAA> {
friend AnalysisInfoMixin<BasicAA>;
static char PassID;
public:
typedef BasicAAResult Result;
BasicAAResult run(Function &F, AnalysisManager<Function> &AM);
};
/// Legacy wrapper pass to provide the BasicAAResult object.
class BasicAAWrapperPass : public FunctionPass {
std::unique_ptr<BasicAAResult> Result;
virtual void anchor();
public:
static char ID;
BasicAAWrapperPass();
BasicAAResult &getResult() { return *Result; }
const BasicAAResult &getResult() const { return *Result; }
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
FunctionPass *createBasicAAWrapperPass();
/// A helper for the legacy pass manager to create a \c BasicAAResult object
/// populated to the best of our ability for a particular function when inside
/// of a \c ModulePass or a \c CallGraphSCCPass.
BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F);
}
#endif

View File

@ -1,130 +0,0 @@
//===- BlockFrequencyInfo.h - Block Frequency Analysis ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
#define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
#include "llvm/ADT/Optional.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/BlockFrequency.h"
#include <climits>
namespace llvm {
class BranchProbabilityInfo;
class LoopInfo;
template <class BlockT> class BlockFrequencyInfoImpl;
/// BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to
/// estimate IR basic block frequencies.
class BlockFrequencyInfo {
typedef BlockFrequencyInfoImpl<BasicBlock> ImplType;
std::unique_ptr<ImplType> BFI;
void operator=(const BlockFrequencyInfo &) = delete;
BlockFrequencyInfo(const BlockFrequencyInfo &) = delete;
public:
BlockFrequencyInfo();
BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
const LoopInfo &LI);
BlockFrequencyInfo(BlockFrequencyInfo &&Arg);
BlockFrequencyInfo &operator=(BlockFrequencyInfo &&RHS);
~BlockFrequencyInfo();
const Function *getFunction() const;
const BranchProbabilityInfo *getBPI() const;
void view() const;
/// getblockFreq - Return block frequency. Return 0 if we don't have the
/// information. Please note that initial frequency is equal to ENTRY_FREQ. It
/// means that we should not rely on the value itself, but only on the
/// comparison to the other block frequencies. We do this to avoid using of
/// floating points.
BlockFrequency getBlockFreq(const BasicBlock *BB) const;
/// \brief Returns the estimated profile count of \p BB.
/// This computes the relative block frequency of \p BB and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getBlockProfileCount(const BasicBlock *BB) const;
// Set the frequency of the given basic block.
void setBlockFreq(const BasicBlock *BB, uint64_t Freq);
/// calculate - compute block frequency info for the given function.
void calculate(const Function &F, const BranchProbabilityInfo &BPI,
const LoopInfo &LI);
// Print the block frequency Freq to OS using the current functions entry
// frequency to convert freq into a relative decimal form.
raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
// Convenience method that attempts to look up the frequency associated with
// BB and print it to OS.
raw_ostream &printBlockFreq(raw_ostream &OS, const BasicBlock *BB) const;
uint64_t getEntryFreq() const;
void releaseMemory();
void print(raw_ostream &OS) const;
};
/// \brief Analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyAnalysis
: public AnalysisInfoMixin<BlockFrequencyAnalysis> {
friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
static char PassID;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef BlockFrequencyInfo Result;
/// \brief Run the analysis pass over a function and produce BFI.
Result run(Function &F, AnalysisManager<Function> &AM);
};
/// \brief Printer pass for the \c BlockFrequencyInfo results.
class BlockFrequencyPrinterPass
: public PassInfoMixin<BlockFrequencyPrinterPass> {
raw_ostream &OS;
public:
explicit BlockFrequencyPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
};
/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyInfoWrapperPass : public FunctionPass {
BlockFrequencyInfo BFI;
public:
static char ID;
BlockFrequencyInfoWrapperPass();
~BlockFrequencyInfoWrapperPass() override;
BlockFrequencyInfo &getBFI() { return BFI; }
const BlockFrequencyInfo &getBFI() const { return BFI; }
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void print(raw_ostream &OS, const Module *M) const override;
};
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,224 +0,0 @@
//===--- BranchProbabilityInfo.h - Branch Probability Analysis --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass is used to evaluate branch probabilties.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/BranchProbability.h"
namespace llvm {
class LoopInfo;
class raw_ostream;
/// \brief Analysis providing branch probability information.
///
/// This is a function analysis which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
/// defined by a pair (PredBlock and an index in the successors). The
/// probability of an edge from one block is always relative to the
/// probabilities of other edges from the block. The probabilites of all edges
/// from a block sum to exactly one (100%).
/// We use a pair (PredBlock and an index in the successors) to uniquely
/// identify an edge, since we can have multiple edges from Src to Dst.
/// As an example, we can have a switch which jumps to Dst with value 0 and
/// value 10.
class BranchProbabilityInfo {
public:
BranchProbabilityInfo() {}
BranchProbabilityInfo(const Function &F, const LoopInfo &LI) {
calculate(F, LI);
}
BranchProbabilityInfo(BranchProbabilityInfo &&Arg)
: Probs(std::move(Arg.Probs)), LastF(Arg.LastF),
PostDominatedByUnreachable(std::move(Arg.PostDominatedByUnreachable)),
PostDominatedByColdCall(std::move(Arg.PostDominatedByColdCall)) {}
BranchProbabilityInfo &operator=(BranchProbabilityInfo &&RHS) {
releaseMemory();
Probs = std::move(RHS.Probs);
PostDominatedByColdCall = std::move(RHS.PostDominatedByColdCall);
PostDominatedByUnreachable = std::move(RHS.PostDominatedByUnreachable);
return *this;
}
void releaseMemory();
void print(raw_ostream &OS) const;
/// \brief Get an edge's probability, relative to other out-edges of the Src.
///
/// This routine provides access to the fractional probability between zero
/// (0%) and one (100%) of this edge executing, relative to other edges
/// leaving the 'Src' block. The returned probability is never zero, and can
/// only be one if the source block has only one successor.
BranchProbability getEdgeProbability(const BasicBlock *Src,
unsigned IndexInSuccessors) const;
/// \brief Get the probability of going from Src to Dst.
///
/// It returns the sum of all probabilities for edges from Src to Dst.
BranchProbability getEdgeProbability(const BasicBlock *Src,
const BasicBlock *Dst) const;
BranchProbability getEdgeProbability(const BasicBlock *Src,
succ_const_iterator Dst) const;
/// \brief Test if an edge is hot relative to other out-edges of the Src.
///
/// Check whether this edge out of the source block is 'hot'. We define hot
/// as having a relative probability >= 80%.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
/// \brief Retrieve the hot successor of a block if one exists.
///
/// Given a basic block, look through its successors and if one exists for
/// which \see isEdgeHot would return true, return that successor block.
const BasicBlock *getHotSucc(const BasicBlock *BB) const;
/// \brief Print an edge's probability.
///
/// Retrieves an edge's probability similarly to \see getEdgeProbability, but
/// then prints that probability to the provided stream. That stream is then
/// returned.
raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
const BasicBlock *Dst) const;
/// \brief Set the raw edge probability for the given edge.
///
/// This allows a pass to explicitly set the edge probability for an edge. It
/// can be used when updating the CFG to update and preserve the branch
/// probability information. Read the implementation of how these edge
/// probabilities are calculated carefully before using!
void setEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors,
BranchProbability Prob);
static BranchProbability getBranchProbStackProtector(bool IsLikely) {
static const BranchProbability LikelyProb((1u << 20) - 1, 1u << 20);
return IsLikely ? LikelyProb : LikelyProb.getCompl();
}
void calculate(const Function &F, const LoopInfo &LI);
/// Forget analysis results for the given basic block.
void eraseBlock(const BasicBlock *BB);
private:
void operator=(const BranchProbabilityInfo &) = delete;
BranchProbabilityInfo(const BranchProbabilityInfo &) = delete;
// We need to store CallbackVH's in order to correctly handle basic block
// removal.
class BasicBlockCallbackVH final : public CallbackVH {
BranchProbabilityInfo *BPI;
void deleted() override {
assert(BPI != nullptr);
BPI->eraseBlock(cast<BasicBlock>(getValPtr()));
BPI->Handles.erase(*this);
}
public:
BasicBlockCallbackVH(const Value *V, BranchProbabilityInfo *BPI=nullptr)
: CallbackVH(const_cast<Value *>(V)), BPI(BPI) {}
};
DenseSet<BasicBlockCallbackVH, DenseMapInfo<Value*>> Handles;
// Since we allow duplicate edges from one basic block to another, we use
// a pair (PredBlock and an index in the successors) to specify an edge.
typedef std::pair<const BasicBlock *, unsigned> Edge;
// Default weight value. Used when we don't have information about the edge.
// TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
// the successors have a weight yet. But it doesn't make sense when providing
// weight to an edge that may have siblings with non-zero weights. This can
// be handled various ways, but it's probably fine for an edge with unknown
// weight to just "inherit" the non-zero weight of an adjacent successor.
static const uint32_t DEFAULT_WEIGHT = 16;
DenseMap<Edge, BranchProbability> Probs;
/// \brief Track the last function we run over for printing.
const Function *LastF;
/// \brief Track the set of blocks directly succeeded by a returning block.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByUnreachable;
/// \brief Track the set of blocks that always lead to a cold call.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByColdCall;
bool calcUnreachableHeuristics(const BasicBlock *BB);
bool calcMetadataWeights(const BasicBlock *BB);
bool calcColdCallHeuristics(const BasicBlock *BB);
bool calcPointerHeuristics(const BasicBlock *BB);
bool calcLoopBranchHeuristics(const BasicBlock *BB, const LoopInfo &LI);
bool calcZeroHeuristics(const BasicBlock *BB);
bool calcFloatingPointHeuristics(const BasicBlock *BB);
bool calcInvokeHeuristics(const BasicBlock *BB);
};
/// \brief Analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityAnalysis
: public AnalysisInfoMixin<BranchProbabilityAnalysis> {
friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
static char PassID;
public:
/// \brief Provide the result typedef for this analysis pass.
typedef BranchProbabilityInfo Result;
/// \brief Run the analysis pass over a function and produce BPI.
BranchProbabilityInfo run(Function &F, AnalysisManager<Function> &AM);
};
/// \brief Printer pass for the \c BranchProbabilityAnalysis results.
class BranchProbabilityPrinterPass
: public PassInfoMixin<BranchProbabilityPrinterPass> {
raw_ostream &OS;
public:
explicit BranchProbabilityPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
};
/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityInfoWrapperPass : public FunctionPass {
BranchProbabilityInfo BPI;
public:
static char ID;
BranchProbabilityInfoWrapperPass() : FunctionPass(ID) {
initializeBranchProbabilityInfoWrapperPassPass(
*PassRegistry::getPassRegistry());
}
BranchProbabilityInfo &getBPI() { return BPI; }
const BranchProbabilityInfo &getBPI() const { return BPI; }
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void print(raw_ostream &OS, const Module *M = nullptr) const override;
};
}
#endif

View File

@ -1,94 +0,0 @@
//===-- Analysis/CFG.h - BasicBlock Analyses --------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This family of functions performs analyses on basic blocks, and instructions
// contained within basic blocks.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CFG_H
#define LLVM_ANALYSIS_CFG_H
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
namespace llvm {
class BasicBlock;
class DominatorTree;
class Function;
class Instruction;
class LoopInfo;
class TerminatorInst;
/// Analyze the specified function to find all of the loop backedges in the
/// function and return them. This is a relatively cheap (compared to
/// computing dominators and loop info) analysis.
///
/// The output is added to Result, as pairs of <from,to> edge info.
void FindFunctionBackedges(
const Function &F,
SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
Result);
/// Search for the specified successor of basic block BB and return its position
/// in the terminator instruction's list of successors. It is an error to call
/// this with a block that is not a successor.
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
/// Return true if the specified edge is a critical edge. Critical edges are
/// edges from a block with multiple successors to a block with multiple
/// predecessors.
///
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
bool AllowIdenticalEdges = false);
/// \brief Determine whether instruction 'To' is reachable from 'From',
/// returning true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
/// Returns false only if we can prove that once 'From' has been executed then
/// 'To' can not be executed. Conservatively returns true.
///
/// This function is linear with respect to the number of blocks in the CFG,
/// walking down successors from From to reach To, with a fixed threshold.
/// Using DT or LI allows us to answer more quickly. LI reduces the cost of
/// an entire loop of any number of blocsk to be the same as the cost of a
/// single block. DT reduces the cost by allowing the search to terminate when
/// we find a block that dominates the block containing 'To'. DT is most useful
/// on branchy code but not loops, and LI is most useful on code with loops but
/// does not help on branchy code outside loops.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
/// Returns false only if we can prove that once 'From' has been reached then
/// 'To' can not be executed. Conservatively returns true.
bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
/// StopBB within a single function. Returns false only if we can prove that
/// once any block in 'Worklist' has been reached then 'StopBB' can not be
/// executed. Conservatively returns true.
bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
BasicBlock *StopBB,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
} // End llvm namespace
#endif

View File

@ -1,130 +0,0 @@
//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the CFG printer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CFGPRINTER_H
#define LLVM_ANALYSIS_CFGPRINTER_H
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/GraphWriter.h"
namespace llvm {
template<>
struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const Function *F) {
return "CFG for '" + F->getName().str() + "' function";
}
static std::string getSimpleNodeLabel(const BasicBlock *Node,
const Function *) {
if (!Node->getName().empty())
return Node->getName().str();
std::string Str;
raw_string_ostream OS(Str);
Node->printAsOperand(OS, false);
return OS.str();
}
static std::string getCompleteNodeLabel(const BasicBlock *Node,
const Function *) {
enum { MaxColumns = 80 };
std::string Str;
raw_string_ostream OS(Str);
if (Node->getName().empty()) {
Node->printAsOperand(OS, false);
OS << ":";
}
OS << *Node;
std::string OutStr = OS.str();
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
// Process string output to make it nicer...
unsigned ColNum = 0;
unsigned LastSpace = 0;
for (unsigned i = 0; i != OutStr.length(); ++i) {
if (OutStr[i] == '\n') { // Left justify
OutStr[i] = '\\';
OutStr.insert(OutStr.begin()+i+1, 'l');
ColNum = 0;
LastSpace = 0;
} else if (OutStr[i] == ';') { // Delete comments!
unsigned Idx = OutStr.find('\n', i+1); // Find end of line
OutStr.erase(OutStr.begin()+i, OutStr.begin()+Idx);
--i;
} else if (ColNum == MaxColumns) { // Wrap lines.
// Wrap very long names even though we can't find a space.
if (!LastSpace)
LastSpace = i;
OutStr.insert(LastSpace, "\\l...");
ColNum = i - LastSpace;
LastSpace = 0;
i += 3; // The loop will advance 'i' again.
}
else
++ColNum;
if (OutStr[i] == ' ')
LastSpace = i;
}
return OutStr;
}
std::string getNodeLabel(const BasicBlock *Node,
const Function *Graph) {
if (isSimple())
return getSimpleNodeLabel(Node, Graph);
else
return getCompleteNodeLabel(Node, Graph);
}
static std::string getEdgeSourceLabel(const BasicBlock *Node,
succ_const_iterator I) {
// Label source of conditional branches with "T" or "F"
if (const BranchInst *BI = dyn_cast<BranchInst>(Node->getTerminator()))
if (BI->isConditional())
return (I == succ_begin(Node)) ? "T" : "F";
// Label source of switch edges with the associated value.
if (const SwitchInst *SI = dyn_cast<SwitchInst>(Node->getTerminator())) {
unsigned SuccNo = I.getSuccessorIndex();
if (SuccNo == 0) return "def";
std::string Str;
raw_string_ostream OS(Str);
SwitchInst::ConstCaseIt Case =
SwitchInst::ConstCaseIt::fromSuccessorIndex(SI, SuccNo);
OS << Case.getCaseValue()->getValue();
return OS.str();
}
return "";
}
};
} // End llvm namespace
namespace llvm {
class FunctionPass;
FunctionPass *createCFGPrinterPass ();
FunctionPass *createCFGOnlyPrinterPass ();
} // End llvm namespace
#endif

View File

@ -1,138 +0,0 @@
//=- CFLAndersAliasAnalysis.h - Unification-based Alias Analysis ---*- C++-*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for LLVM's inclusion-based alias analysis
/// implemented with CFL graph reachability.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
#define LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <forward_list>
namespace llvm {
class TargetLibraryInfo;
namespace cflaa {
struct AliasSummary;
}
class CFLAndersAAResult : public AAResultBase<CFLAndersAAResult> {
friend AAResultBase<CFLAndersAAResult>;
class FunctionInfo;
public:
explicit CFLAndersAAResult(const TargetLibraryInfo &);
CFLAndersAAResult(CFLAndersAAResult &&);
~CFLAndersAAResult();
/// Handle invalidation events from the new pass manager.
/// By definition, this result is stateless and so remains valid.
bool invalidate(Function &, const PreservedAnalyses &) { return false; }
/// Evict the given function from cache
void evict(const Function &Fn);
/// \brief Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(const Function &);
AliasResult query(const MemoryLocation &, const MemoryLocation &);
AliasResult alias(const MemoryLocation &, const MemoryLocation &);
private:
struct FunctionHandle final : public CallbackVH {
FunctionHandle(Function *Fn, CFLAndersAAResult *Result)
: CallbackVH(Fn), Result(Result) {
assert(Fn != nullptr);
assert(Result != nullptr);
}
void deleted() override { removeSelfFromCache(); }
void allUsesReplacedWith(Value *) override { removeSelfFromCache(); }
private:
CFLAndersAAResult *Result;
void removeSelfFromCache() {
assert(Result != nullptr);
auto *Val = getValPtr();
Result->evict(*cast<Function>(Val));
setValPtr(nullptr);
}
};
/// \brief Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(const Function &);
/// \brief Inserts the given Function into the cache.
void scan(const Function &);
/// \brief Build summary for a given function
FunctionInfo buildInfoFrom(const Function &);
const TargetLibraryInfo &TLI;
/// \brief Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function
/// that simply has empty sets.
DenseMap<const Function *, Optional<FunctionInfo>> Cache;
std::forward_list<FunctionHandle> Handles;
};
/// Analysis pass providing a never-invalidated alias analysis result.
///
/// FIXME: We really should refactor CFL to use the analysis more heavily, and
/// in particular to leverage invalidation to trigger re-computation.
class CFLAndersAA : public AnalysisInfoMixin<CFLAndersAA> {
friend AnalysisInfoMixin<CFLAndersAA>;
static char PassID;
public:
typedef CFLAndersAAResult Result;
CFLAndersAAResult run(Function &F, AnalysisManager<Function> &AM);
};
/// Legacy wrapper pass to provide the CFLAndersAAResult object.
class CFLAndersAAWrapperPass : public ImmutablePass {
std::unique_ptr<CFLAndersAAResult> Result;
public:
static char ID;
CFLAndersAAWrapperPass();
CFLAndersAAResult &getResult() { return *Result; }
const CFLAndersAAResult &getResult() const { return *Result; }
void initializePass() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
//===--------------------------------------------------------------------===//
//
// createCFLAndersAAWrapperPass - This pass implements a set-based approach to
// alias analysis.
//
ImmutablePass *createCFLAndersAAWrapperPass();
}
#endif

View File

@ -1,167 +0,0 @@
//=- CFLSteensAliasAnalysis.h - Unification-based Alias Analysis ---*- C++-*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for LLVM's unification-based alias analysis
/// implemented with CFL graph reachability.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
#define LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <forward_list>
namespace llvm {
class TargetLibraryInfo;
namespace cflaa {
struct AliasSummary;
}
class CFLSteensAAResult : public AAResultBase<CFLSteensAAResult> {
friend AAResultBase<CFLSteensAAResult>;
class FunctionInfo;
public:
explicit CFLSteensAAResult(const TargetLibraryInfo &);
CFLSteensAAResult(CFLSteensAAResult &&Arg);
~CFLSteensAAResult();
/// Handle invalidation events from the new pass manager.
///
/// By definition, this result is stateless and so remains valid.
bool invalidate(Function &, const PreservedAnalyses &) { return false; }
/// \brief Inserts the given Function into the cache.
void scan(Function *Fn);
void evict(Function *Fn);
/// \brief Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(Function *Fn);
/// \brief Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(Function &Fn);
AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
if (LocA.Ptr == LocB.Ptr)
return LocA.Size == LocB.Size ? MustAlias : PartialAlias;
// Comparisons between global variables and other constants should be
// handled by BasicAA.
// CFLSteensAA may report NoAlias when comparing a GlobalValue and
// ConstantExpr, but every query needs to have at least one Value tied to a
// Function, and neither GlobalValues nor ConstantExprs are.
if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr))
return AAResultBase::alias(LocA, LocB);
AliasResult QueryResult = query(LocA, LocB);
if (QueryResult == MayAlias)
return AAResultBase::alias(LocA, LocB);
return QueryResult;
}
/// Get the location associated with a pointer argument of a callsite.
ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
/// Returns the behavior when calling the given call site.
FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
/// Returns the behavior when calling the given function. For use when the
/// call site is not known.
FunctionModRefBehavior getModRefBehavior(const Function *F);
private:
struct FunctionHandle final : public CallbackVH {
FunctionHandle(Function *Fn, CFLSteensAAResult *Result)
: CallbackVH(Fn), Result(Result) {
assert(Fn != nullptr);
assert(Result != nullptr);
}
void deleted() override { removeSelfFromCache(); }
void allUsesReplacedWith(Value *) override { removeSelfFromCache(); }
private:
CFLSteensAAResult *Result;
void removeSelfFromCache() {
assert(Result != nullptr);
auto *Val = getValPtr();
Result->evict(cast<Function>(Val));
setValPtr(nullptr);
}
};
const TargetLibraryInfo &TLI;
/// \brief Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function
/// that simply has empty sets.
DenseMap<Function *, Optional<FunctionInfo>> Cache;
std::forward_list<FunctionHandle> Handles;
FunctionInfo buildSetsFrom(Function *F);
};
/// Analysis pass providing a never-invalidated alias analysis result.
///
/// FIXME: We really should refactor CFL to use the analysis more heavily, and
/// in particular to leverage invalidation to trigger re-computation of sets.
class CFLSteensAA : public AnalysisInfoMixin<CFLSteensAA> {
friend AnalysisInfoMixin<CFLSteensAA>;
static char PassID;
public:
typedef CFLSteensAAResult Result;
CFLSteensAAResult run(Function &F, AnalysisManager<Function> &AM);
};
/// Legacy wrapper pass to provide the CFLSteensAAResult object.
class CFLSteensAAWrapperPass : public ImmutablePass {
std::unique_ptr<CFLSteensAAResult> Result;
public:
static char ID;
CFLSteensAAWrapperPass();
CFLSteensAAResult &getResult() { return *Result; }
const CFLSteensAAResult &getResult() const { return *Result; }
void initializePass() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
//===--------------------------------------------------------------------===//
//
// createCFLSteensAAWrapperPass - This pass implements a set-based approach to
// alias analysis.
//
ImmutablePass *createCFLSteensAAWrapperPass();
}
#endif

Some files were not shown because too many files have changed in this diff Show More