@@ -91,7 +91,7 @@ static void EmitMemZero(IRBuilder<> &B, Value *Dst, Value *Len,
91
91
92
92
// namespace {
93
93
94
- Value* FunctionInfo::promote (LLCallBasePtr CB, IRBuilder<> &B, const G2StackAnalysis &A) {
94
+ Value* FunctionInfo::promote (CallBase * CB, IRBuilder<> &B, const G2StackAnalysis &A) {
95
95
NumGcToStack++;
96
96
97
97
auto &BB = CB->getCaller ()->getEntryBlock ();
@@ -128,7 +128,7 @@ static bool isKnownLessThan(Value *Val, uint64_t Limit, const G2StackAnalysis &A
128
128
return true ;
129
129
}
130
130
131
- bool TypeInfoFI::analyze (LLCallBasePtr CB, const G2StackAnalysis &A) {
131
+ bool TypeInfoFI::analyze (CallBase * CB, const G2StackAnalysis &A) {
132
132
Value *TypeInfo = CB->getArgOperand (TypeInfoArgNr);
133
133
Ty = A.getTypeFor (TypeInfo, 0 );
134
134
if (!Ty) {
@@ -137,7 +137,7 @@ bool TypeInfoFI::analyze(LLCallBasePtr CB, const G2StackAnalysis &A) {
137
137
return A.DL .getTypeAllocSize (Ty) < SizeLimit;
138
138
}
139
139
140
- bool ArrayFI::analyze (LLCallBasePtr CB, const G2StackAnalysis &A) {
140
+ bool ArrayFI::analyze (CallBase * CB, const G2StackAnalysis &A) {
141
141
if (!TypeInfoFI::analyze (CB, A)) {
142
142
return false ;
143
143
}
@@ -160,7 +160,7 @@ bool ArrayFI::analyze(LLCallBasePtr CB, const G2StackAnalysis &A) {
160
160
return true ;
161
161
}
162
162
163
- Value* ArrayFI::promote (LLCallBasePtr CB, IRBuilder<> &B, const G2StackAnalysis &A) {
163
+ Value* ArrayFI::promote (CallBase * CB, IRBuilder<> &B, const G2StackAnalysis &A) {
164
164
// If the allocation is of constant size it's best to put it in the
165
165
// entry block, so do so if we're not already there.
166
166
// For dynamically-sized allocations it's best to avoid the overhead
@@ -203,7 +203,7 @@ Value* ArrayFI::promote(LLCallBasePtr CB, IRBuilder<> &B, const G2StackAnalysis
203
203
204
204
return alloca;
205
205
}
206
- bool AllocClassFI::analyze (LLCallBasePtr CB, const G2StackAnalysis &A) {
206
+ bool AllocClassFI::analyze (CallBase * CB, const G2StackAnalysis &A) {
207
207
if (CB->arg_size () != 1 ) {
208
208
return false ;
209
209
}
@@ -238,7 +238,7 @@ bool AllocClassFI::analyze(LLCallBasePtr CB, const G2StackAnalysis &A) {
238
238
->getType ();
239
239
return A.DL .getTypeAllocSize (Ty) < SizeLimit;
240
240
}
241
- bool UntypedMemoryFI::analyze (LLCallBasePtr CB, const G2StackAnalysis &A) {
241
+ bool UntypedMemoryFI::analyze (CallBase * CB, const G2StackAnalysis &A) {
242
242
if (CB->arg_size () < SizeArgNr + 1 ) {
243
243
return false ;
244
244
}
@@ -260,7 +260,7 @@ bool UntypedMemoryFI::analyze(LLCallBasePtr CB, const G2StackAnalysis &A) {
260
260
Ty = llvm::Type::getInt8Ty (CB->getContext ());
261
261
return true ;
262
262
}
263
- Value* UntypedMemoryFI::promote (LLCallBasePtr CB, IRBuilder<> &B, const G2StackAnalysis &A) {
263
+ Value* UntypedMemoryFI::promote (CallBase * CB, IRBuilder<> &B, const G2StackAnalysis &A) {
264
264
// If the allocation is of constant size it's best to put it in the
265
265
// entry block, so do so if we're not already there.
266
266
// For dynamically-sized allocations it's best to avoid the overhead
@@ -341,7 +341,7 @@ GarbageCollect2Stack::GarbageCollect2Stack()
341
341
NewArrayT(ReturnType::Array, 0 , 1 , true ), AllocMemory(0 ) {
342
342
}
343
343
344
- static void RemoveCall (LLCallBasePtr CB, const G2StackAnalysis &A) {
344
+ static void RemoveCall (CallBase * CB, const G2StackAnalysis &A) {
345
345
// For an invoke instruction, we insert a branch to the normal target BB
346
346
// immediately before it. Ideally, we would find a way to not invalidate
347
347
// the dominator tree here.
0 commit comments