diff options
| author | Josh Magee <joshua_magee@playstation.sony.com> | 2014-02-01 01:36:16 +0000 |
|---|---|---|
| committer | Josh Magee <joshua_magee@playstation.sony.com> | 2014-02-01 01:36:16 +0000 |
| commit | 24c7f063333b0654fb3d6667d0096ce97141b82e (patch) | |
| tree | 7332ef67a673107e3d0cf9bcabe73dd2e9d743c4 /llvm/test/CodeGen/X86/ssp-data-layout.ll | |
| parent | 31cb474e2d0dea9eb4298b6cbfe5887bbde7e3cf (diff) | |
| download | bcm5719-llvm-24c7f063333b0654fb3d6667d0096ce97141b82e.tar.gz bcm5719-llvm-24c7f063333b0654fb3d6667d0096ce97141b82e.zip | |
[stackprotector] Implement the sspstrong rules for stack layout.
This changes the PrologueEpilogInserter and LocalStackSlotAllocation passes to
follow the extended stack layout rules for sspstrong and sspreq.
The sspstrong layout rules are:
1. Large arrays and structures containing large arrays (>= ssp-buffer-size)
are closest to the stack protector.
2. Small arrays and structures containing small arrays (< ssp-buffer-size) are
2nd closest to the protector.
3. Variables that have had their address taken are 3rd closest to the
protector.
Differential Revision: http://llvm-reviews.chandlerc.com/D2546
llvm-svn: 200601
Diffstat (limited to 'llvm/test/CodeGen/X86/ssp-data-layout.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/ssp-data-layout.ll | 280 |
1 files changed, 276 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/ssp-data-layout.ll b/llvm/test/CodeGen/X86/ssp-data-layout.ll index 72194af96f1..e76ad7b871b 100644 --- a/llvm/test/CodeGen/X86/ssp-data-layout.ll +++ b/llvm/test/CodeGen/X86/ssp-data-layout.ll @@ -21,7 +21,6 @@ ; on a non-linux target the data layout rules are triggered. %struct.struct_large_char = type { [8 x i8] } -%struct.struct_large_char2 = type { [2 x i8], [8 x i8] } %struct.struct_small_char = type { [2 x i8] } %struct.struct_large_nonchar = type { [8 x i32] } %struct.struct_small_nonchar = type { [2 x i16] } @@ -170,6 +169,282 @@ entry: ret void } +define void @layout_sspstrong() nounwind uwtable sspstrong { +entry: +; Expected stack layout for sspstrong is +; -48 large_nonchar . Group 1, nested arrays, +; -56 large_char . arrays >= ssp-buffer-size +; -64 struct_large_char . +; -96 struct_large_nonchar . +; -100 small_non_char | Group 2, nested arrays, +; -102 small_char | arrays < ssp-buffer-size +; -104 struct_small_char | +; -112 struct_small_nonchar | +; -116 addrof * Group 3, addr-of local +; -120 scalar + Group 4, everything else +; -124 scalar + +; -128 scalar + +; +; CHECK: layout_sspstrong: +; CHECK: call{{l|q}} get_scalar1 +; CHECK: movl %eax, -120( +; CHECK: call{{l|q}} end_scalar1 + +; CHECK: call{{l|q}} get_scalar2 +; CHECK: movl %eax, -124( +; CHECK: call{{l|q}} end_scalar2 + +; CHECK: call{{l|q}} get_scalar3 +; CHECK: movl %eax, -128( +; CHECK: call{{l|q}} end_scalar3 + +; CHECK: call{{l|q}} get_addrof +; CHECK: movl %eax, -116( +; CHECK: call{{l|q}} end_addrof + +; CHECK: get_small_nonchar +; CHECK: movw %ax, -100( +; CHECK: call{{l|q}} end_small_nonchar + +; CHECK: call{{l|q}} get_large_nonchar +; CHECK: movl %eax, -48( +; CHECK: call{{l|q}} end_large_nonchar + +; CHECK: call{{l|q}} get_small_char +; CHECK: movb %al, -102( +; CHECK: call{{l|q}} end_small_char + +; CHECK: call{{l|q}} get_large_char +; CHECK: movb %al, -56( +; CHECK: call{{l|q}} end_large_char + +; CHECK: call{{l|q}} get_struct_large_char +; CHECK: movb %al, -64( +; CHECK: call{{l|q}} end_struct_large_char + +; CHECK: call{{l|q}} get_struct_small_char +; CHECK: movb %al, -104( +; CHECK: call{{l|q}} end_struct_small_char + +; CHECK: call{{l|q}} get_struct_large_nonchar +; CHECK: movl %eax, -96( +; CHECK: call{{l|q}} end_struct_large_nonchar + +; CHECK: call{{l|q}} get_struct_small_nonchar +; CHECK: movw %ax, -112( +; CHECK: call{{l|q}} end_struct_small_nonchar + %x = alloca i32, align 4 + %y = alloca i32, align 4 + %z = alloca i32, align 4 + %ptr = alloca i32, align 4 + %small2 = alloca [2 x i16], align 2 + %large2 = alloca [8 x i32], align 16 + %small = alloca [2 x i8], align 1 + %large = alloca [8 x i8], align 1 + %a = alloca %struct.struct_large_char, align 1 + %b = alloca %struct.struct_small_char, align 1 + %c = alloca %struct.struct_large_nonchar, align 8 + %d = alloca %struct.struct_small_nonchar, align 2 + %call = call i32 @get_scalar1() + store i32 %call, i32* %x, align 4 + call void @end_scalar1() + %call1 = call i32 @get_scalar2() + store i32 %call1, i32* %y, align 4 + call void @end_scalar2() + %call2 = call i32 @get_scalar3() + store i32 %call2, i32* %z, align 4 + call void @end_scalar3() + %call3 = call i32 @get_addrof() + store i32 %call3, i32* %ptr, align 4 + call void @end_addrof() + %call4 = call signext i16 @get_small_nonchar() + %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0 + store i16 %call4, i16* %arrayidx, align 2 + call void @end_small_nonchar() + %call5 = call i32 @get_large_nonchar() + %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0 + store i32 %call5, i32* %arrayidx6, align 4 + call void @end_large_nonchar() + %call7 = call signext i8 @get_small_char() + %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0 + store i8 %call7, i8* %arrayidx8, align 1 + call void @end_small_char() + %call9 = call signext i8 @get_large_char() + %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0 + store i8 %call9, i8* %arrayidx10, align 1 + call void @end_large_char() + %call11 = call signext i8 @get_struct_large_char() + %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0 + %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0 + store i8 %call11, i8* %arrayidx12, align 1 + call void @end_struct_large_char() + %call13 = call signext i8 @get_struct_small_char() + %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0 + %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0 + store i8 %call13, i8* %arrayidx15, align 1 + call void @end_struct_small_char() + %call16 = call i32 @get_struct_large_nonchar() + %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0 + %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0 + store i32 %call16, i32* %arrayidx18, align 4 + call void @end_struct_large_nonchar() + %call19 = call signext i16 @get_struct_small_nonchar() + %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0 + %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0 + store i16 %call19, i16* %arrayidx21, align 2 + call void @end_struct_small_nonchar() + %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0 + %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0 + %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0 + %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0 + %0 = load i32* %x, align 4 + %1 = load i32* %y, align 4 + %2 = load i32* %z, align 4 + %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0 + %3 = bitcast [8 x i8]* %coerce.dive to i64* + %4 = load i64* %3, align 1 + %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0 + %5 = bitcast [2 x i8]* %coerce.dive25 to i16* + %6 = load i16* %5, align 1 + %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0 + %7 = bitcast [2 x i16]* %coerce.dive26 to i32* + %8 = load i32* %7, align 1 + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + ret void +} + +define void @layout_sspreq() nounwind uwtable sspreq { +entry: +; Expected stack layout for sspreq is the same as sspstrong +; +; CHECK: layout_sspreq: +; CHECK: call{{l|q}} get_scalar1 +; CHECK: movl %eax, -120( +; CHECK: call{{l|q}} end_scalar1 + +; CHECK: call{{l|q}} get_scalar2 +; CHECK: movl %eax, -124( +; CHECK: call{{l|q}} end_scalar2 + +; CHECK: call{{l|q}} get_scalar3 +; CHECK: movl %eax, -128( +; CHECK: call{{l|q}} end_scalar3 + +; CHECK: call{{l|q}} get_addrof +; CHECK: movl %eax, -116( +; CHECK: call{{l|q}} end_addrof + +; CHECK: get_small_nonchar +; CHECK: movw %ax, -100( +; CHECK: call{{l|q}} end_small_nonchar + +; CHECK: call{{l|q}} get_large_nonchar +; CHECK: movl %eax, -48( +; CHECK: call{{l|q}} end_large_nonchar + +; CHECK: call{{l|q}} get_small_char +; CHECK: movb %al, -102( +; CHECK: call{{l|q}} end_small_char + +; CHECK: call{{l|q}} get_large_char +; CHECK: movb %al, -56( +; CHECK: call{{l|q}} end_large_char + +; CHECK: call{{l|q}} get_struct_large_char +; CHECK: movb %al, -64( +; CHECK: call{{l|q}} end_struct_large_char + +; CHECK: call{{l|q}} get_struct_small_char +; CHECK: movb %al, -104( +; CHECK: call{{l|q}} end_struct_small_char + +; CHECK: call{{l|q}} get_struct_large_nonchar +; CHECK: movl %eax, -96( +; CHECK: call{{l|q}} end_struct_large_nonchar + +; CHECK: call{{l|q}} get_struct_small_nonchar +; CHECK: movw %ax, -112( +; CHECK: call{{l|q}} end_struct_small_nonchar + %x = alloca i32, align 4 + %y = alloca i32, align 4 + %z = alloca i32, align 4 + %ptr = alloca i32, align 4 + %small2 = alloca [2 x i16], align 2 + %large2 = alloca [8 x i32], align 16 + %small = alloca [2 x i8], align 1 + %large = alloca [8 x i8], align 1 + %a = alloca %struct.struct_large_char, align 1 + %b = alloca %struct.struct_small_char, align 1 + %c = alloca %struct.struct_large_nonchar, align 8 + %d = alloca %struct.struct_small_nonchar, align 2 + %call = call i32 @get_scalar1() + store i32 %call, i32* %x, align 4 + call void @end_scalar1() + %call1 = call i32 @get_scalar2() + store i32 %call1, i32* %y, align 4 + call void @end_scalar2() + %call2 = call i32 @get_scalar3() + store i32 %call2, i32* %z, align 4 + call void @end_scalar3() + %call3 = call i32 @get_addrof() + store i32 %call3, i32* %ptr, align 4 + call void @end_addrof() + %call4 = call signext i16 @get_small_nonchar() + %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0 + store i16 %call4, i16* %arrayidx, align 2 + call void @end_small_nonchar() + %call5 = call i32 @get_large_nonchar() + %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0 + store i32 %call5, i32* %arrayidx6, align 4 + call void @end_large_nonchar() + %call7 = call signext i8 @get_small_char() + %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0 + store i8 %call7, i8* %arrayidx8, align 1 + call void @end_small_char() + %call9 = call signext i8 @get_large_char() + %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0 + store i8 %call9, i8* %arrayidx10, align 1 + call void @end_large_char() + %call11 = call signext i8 @get_struct_large_char() + %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0 + %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0 + store i8 %call11, i8* %arrayidx12, align 1 + call void @end_struct_large_char() + %call13 = call signext i8 @get_struct_small_char() + %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0 + %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0 + store i8 %call13, i8* %arrayidx15, align 1 + call void @end_struct_small_char() + %call16 = call i32 @get_struct_large_nonchar() + %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0 + %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0 + store i32 %call16, i32* %arrayidx18, align 4 + call void @end_struct_large_nonchar() + %call19 = call signext i16 @get_struct_small_nonchar() + %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0 + %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0 + store i16 %call19, i16* %arrayidx21, align 2 + call void @end_struct_small_nonchar() + %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0 + %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0 + %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0 + %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0 + %0 = load i32* %x, align 4 + %1 = load i32* %y, align 4 + %2 = load i32* %z, align 4 + %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0 + %3 = bitcast [8 x i8]* %coerce.dive to i64* + %4 = load i64* %3, align 1 + %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0 + %5 = bitcast [2 x i8]* %coerce.dive25 to i16* + %6 = load i16* %5, align 1 + %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0 + %7 = bitcast [2 x i16]* %coerce.dive26 to i32* + %8 = load i32* %7, align 1 + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + ret void +} + define void @fast_non_linux() ssp { entry: ; FAST-NON-LIN: fast_non_linux: @@ -222,9 +497,6 @@ declare void @end_large_char() declare signext i8 @get_struct_large_char() declare void @end_struct_large_char() -declare signext i8 @get_struct_large_char2() -declare void @end_struct_large_char2() - declare signext i8 @get_struct_small_char() declare void @end_struct_small_char() |

