llvm.org GIT mirror llvm / a5703bc
[PlaceSafepoints] Stop special casing some intrinsics We were special casing a handful of intrinsics as not needing a safepoint before them. After running into another valid case - memset - I took a closer look and realized that almost no intrinsics need to have a safepoint poll before them. Restructure the code to make that apparent so that we stop hitting these bugs. The only intrinsics which need a safepoint poll before them are ones which can run arbitrary code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@237744 91177308-0d34-0410-b5e6-96231b3b80d8 Philip Reames 4 years ago
2 changed file(s) with 56 addition(s) and 17 deletion(s). Raw diff Collapse all Expand all
380380 return false;
381381 }
382382
383 /// Returns true if an entry safepoint is not required before this callsite in
384 /// the caller function.
385 static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) {
386 Instruction *Inst = CS.getInstruction();
387 if (IntrinsicInst *II = dyn_cast(Inst)) {
388 switch (II->getIntrinsicID()) {
389 case Intrinsic::experimental_gc_statepoint:
390 case Intrinsic::experimental_patchpoint_void:
391 case Intrinsic::experimental_patchpoint_i64:
392 // The can wrap an actual call which may grow the stack by an unbounded
393 // amount or run forever.
394 return false;
395 default:
396 // Most LLVM intrinsics are things which do not expand to actual calls, or
397 // at least if they do, are leaf functions that cause only finite stack
398 // growth. In particular, the optimizer likes to form things like memsets
399 // out of stores in the original IR. Another important example is
400 // llvm.frameescape which must occur in the entry block. Inserting a
401 // safepoint before it is not legal since it could push the frameescape
402 // out of the entry block.
403 return true;
404 }
405 }
406 return false;
407 }
408
383409 static Instruction *findLocationForEntrySafepoint(Function &F,
384410 DominatorTree &DT) {
385411
420446 for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor);
421447 cursor = nextInstruction(cursor)) {
422448
423 // We need to stop going forward as soon as we see a call that can
424 // grow the stack (i.e. the call target has a non-zero frame
425 // size).
426 if (CallSite(cursor)) {
427 if (IntrinsicInst *II = dyn_cast(cursor)) {
428 // llvm.assume(...) are not really calls.
429 if (II->getIntrinsicID() == Intrinsic::assume) {
430 continue;
431 }
432 // llvm.frameescape() intrinsic is not a real call. The intrinsic can
433 // exist only in the entry block.
434 // Inserting a statepoint before llvm.frameescape() may split the
435 // entry block, and push the intrinsic out of the entry block.
436 if (II->getIntrinsicID() == Intrinsic::frameescape) {
437 continue;
438 }
439 }
449 // We need to ensure a safepoint poll occurs before any 'real' call. The
450 // easiest way to ensure finite execution between safepoints in the face of
451 // recursive and mutually recursive functions is to enforce that each take
452 // a safepoint. Additionally, we need to ensure a poll before any call
453 // which can grow the stack by an unbounded amount. This isn't required
454 // for GC semantics per se, but is a common requirement for languages
455 // which detect stack overflow via guard pages and then throw exceptions.
456 if (auto CS = CallSite(cursor)) {
457 if (doesNotRequireEntrySafepointBefore(CS))
458 continue;
440459 break;
441460 }
442461 }
0 ; RUN: opt -S -place-safepoints %s | FileCheck %s
1
2 define void @test(i32, i8 addrspace(1)* %ptr) gc "statepoint-example" {
3 ; CHECK-LABEL: @test
4 ; CHECK-NEXT: llvm.memset
5 ; CHECK: do_safepoint
6 ; CHECK: @foo
7 call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %ptr, i8 0, i64 24, i32 8, i1 false)
8 call void @foo()
9 ret void
10 }
11
12 declare void @foo()
13 declare void @llvm.memset.p1i8.i64(i8 addrspace(1)*, i8, i64, i32, i1)
14
15 declare void @do_safepoint()
16 define void @gc.safepoint_poll() {
17 call void @do_safepoint()
18 ret void
19 }