summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2012-08-28 23:49:37 -0400
committerAnthony G. Basile <blueness@gentoo.org>2012-08-28 23:49:37 -0400
commitfaf75b3fcbabeaab23af0a979389878c0f945e36 (patch)
treec2d31c721129b18212111fb0b6196c3aad9d699a
parentGrsec/PaX: 2.9.1-{2.6.32.59,3.2.28,3.5.2}-201208241943 (diff)
downloadhardened-patchset-faf75b3fcbabeaab23af0a979389878c0f945e36.tar.gz
hardened-patchset-faf75b3fcbabeaab23af0a979389878c0f945e36.tar.bz2
hardened-patchset-faf75b3fcbabeaab23af0a979389878c0f945e36.zip
Grsec/PaX: 2.9.1-{2.6.32.59,3.2.28,3.5.2}-201208271906
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208271903.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208232048.patch)380
-rw-r--r--3.2.28/0000_README2
-rw-r--r--3.2.28/4420_grsecurity-2.9.1-3.2.28-201208271905.patch (renamed from 3.2.28/4420_grsecurity-2.9.1-3.2.28-201208232048.patch)419
-rw-r--r--3.5.2/0000_README2
-rw-r--r--3.5.2/4420_grsecurity-2.9.1-3.5.3-201208271906.patch (renamed from 3.5.2/4420_grsecurity-2.9.1-3.5.2-201208241943.patch)175
6 files changed, 600 insertions, 380 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 9c19fa1..16680e5 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
From: http://www.kernel.org
Desc: Linux 2.6.32.59
-Patch: 4420_grsecurity-2.9.1-2.6.32.59-201208232048.patch
+Patch: 4420_grsecurity-2.9.1-2.6.32.59-201208271903.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208232048.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208271903.patch
index da02455..63a8206 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208232048.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201208271903.patch
@@ -4802,6 +4802,26 @@ index b97c2d6..dd01a6a 100644
}
return error;
}
+diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
+index 3370e62..527c659 100644
+--- a/arch/powerpc/kernel/syscalls.c
++++ b/arch/powerpc/kernel/syscalls.c
+@@ -201,11 +201,11 @@ long ppc64_personality(unsigned long personality)
+ long ret;
+
+ if (personality(current->personality) == PER_LINUX32
+- && personality == PER_LINUX)
+- personality = PER_LINUX32;
++ && personality(personality) == PER_LINUX)
++ personality = (personality & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+ }
+ #endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6f0ae1a..e4b6a56 100644
--- a/arch/powerpc/kernel/traps.c
@@ -9657,7 +9677,7 @@ index 588a7aa..a3468b0 100644
if (err)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 4edd8eb..29124b4 100644
+index 4edd8eb..273579e 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -13,7 +13,9 @@
@@ -9716,7 +9736,7 @@ index 4edd8eb..29124b4 100644
movl %ebp,%ebp /* zero extension */
pushq $__USER32_DS
CFI_ADJUST_CFA_OFFSET 8
-@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
+@@ -135,28 +157,47 @@ ENTRY(ia32_sysenter_target)
pushfq
CFI_ADJUST_CFA_OFFSET 8
/*CFI_REL_OFFSET rflags,0*/
@@ -9739,6 +9759,11 @@ index 4edd8eb..29124b4 100644
cld
SAVE_ARGS 0,0,1
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs, here we enable it straight after entry:
@@ -9765,7 +9790,7 @@ index 4edd8eb..29124b4 100644
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -166,13 +202,15 @@ sysenter_do_call:
+@@ -166,13 +207,15 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -9784,7 +9809,7 @@ index 4edd8eb..29124b4 100644
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */
-@@ -200,6 +238,9 @@ sysexit_from_sys_call:
+@@ -200,6 +243,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
@@ -9794,7 +9819,7 @@ index 4edd8eb..29124b4 100644
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -211,7 +252,7 @@ sysexit_from_sys_call:
+@@ -211,7 +257,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
@@ -9803,7 +9828,7 @@ index 4edd8eb..29124b4 100644
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
sti
-@@ -221,12 +262,12 @@ sysexit_from_sys_call:
+@@ -221,12 +267,12 @@ sysexit_from_sys_call:
movzbl %al,%edi /* zero-extend that into %edi */
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
call audit_syscall_exit
@@ -9818,7 +9843,7 @@ index 4edd8eb..29124b4 100644
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
-@@ -244,7 +285,7 @@ sysexit_audit:
+@@ -244,7 +290,7 @@ sysexit_audit:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -9827,17 +9852,17 @@ index 4edd8eb..29124b4 100644
jz sysenter_auditsys
#endif
SAVE_REST
-@@ -252,6 +293,9 @@ sysenter_tracesys:
- movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
+@@ -256,6 +302,9 @@ sysenter_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
+ jmp sysenter_do_call
+ CFI_ENDPROC
+ ENDPROC(ia32_sysenter_target)
+@@ -283,19 +332,25 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
@@ -9851,6 +9876,11 @@ index 4edd8eb..29124b4 100644
movq PER_CPU_VAR(kernel_stack),%rsp
+ SAVE_ARGS 8*6,1,1
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
@@ -9860,7 +9890,7 @@ index 4edd8eb..29124b4 100644
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
+@@ -311,13 +366,19 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
@@ -9883,7 +9913,7 @@ index 4edd8eb..29124b4 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -327,13 +378,15 @@ cstar_do_call:
+@@ -327,13 +388,15 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -9902,7 +9932,7 @@ index 4edd8eb..29124b4 100644
RESTORE_ARGS 1,-ARG_SKIP,1,1,1
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
-@@ -361,7 +414,7 @@ sysretl_audit:
+@@ -361,7 +424,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -9911,17 +9941,17 @@ index 4edd8eb..29124b4 100644
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -370,6 +423,9 @@ cstar_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
+@@ -375,6 +438,9 @@ cstar_tracesys:
+ xchgl %ebp,%r9d
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- xchgl %ebp,%r9d
-@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
+ jmp cstar_do_call
+ END(ia32_cstar_target)
+
+@@ -415,11 +481,6 @@ ENTRY(ia32_syscall)
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
@@ -9933,7 +9963,7 @@ index 4edd8eb..29124b4 100644
movl %eax,%eax
pushq %rax
CFI_ADJUST_CFA_OFFSET 8
-@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
+@@ -427,9 +488,20 @@ ENTRY(ia32_syscall)
/* note the registers are not zero extended to the sf.
this could be a problem. */
SAVE_ARGS 0,0,1
@@ -9941,6 +9971,11 @@ index 4edd8eb..29124b4 100644
- orl $TS_COMPAT,TI_status(%r10)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
@@ -9952,17 +9987,17 @@ index 4edd8eb..29124b4 100644
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -448,6 +505,9 @@ ia32_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
+@@ -452,6 +524,9 @@ ia32_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -462,6 +522,7 @@ ia32_badsys:
+ jmp ia32_do_call
+ END(ia32_syscall)
+
+@@ -462,6 +537,7 @@ ia32_badsys:
quiet_ni_syscall:
movq $-ENOSYS,%rax
@@ -17126,7 +17161,7 @@ index 4c07cca..2c8427d 100644
ret
ENDPROC(efi_call6)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index c097e7d..853746c 100644
+index c097e7d..a3f1930 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -95,12 +95,6 @@
@@ -17142,7 +17177,7 @@ index c097e7d..853746c 100644
/*
* User gs save/restore
*
-@@ -185,13 +179,146 @@
+@@ -185,13 +179,153 @@
/*CFI_REL_OFFSET gs, PT_GS*/
.endm
.macro SET_KERNEL_GS reg
@@ -17246,10 +17281,10 @@ index c097e7d..853746c 100644
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+/*
+ * ebp: thread_info
-+ * ecx, edx: can be clobbered
+ */
+ENTRY(pax_erase_kstack)
+ pushl %edi
++ pushl %ecx
+ pushl %eax
+
+ mov TI_lowest_stack(%ebp), %edi
@@ -17273,6 +17308,12 @@ index c097e7d..853746c 100644
+2: cld
+ mov %esp, %ecx
+ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %ecx
++ jb 3f
++ ud2
++3:
++
+ shr $2, %ecx
+ rep stosl
+
@@ -17281,6 +17322,7 @@ index c097e7d..853746c 100644
+ mov %edi, TI_lowest_stack(%ebp)
+
+ popl %eax
++ popl %ecx
+ popl %edi
+ ret
+ENDPROC(pax_erase_kstack)
@@ -17290,7 +17332,7 @@ index c097e7d..853746c 100644
cld
PUSH_GS
pushl %fs
-@@ -224,7 +351,7 @@
+@@ -224,7 +358,7 @@
pushl %ebx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0
@@ -17299,7 +17341,7 @@ index c097e7d..853746c 100644
movl %edx, %ds
movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
-@@ -232,6 +359,15 @@
+@@ -232,6 +366,15 @@
SET_KERNEL_GS %edx
.endm
@@ -17315,7 +17357,7 @@ index c097e7d..853746c 100644
.macro RESTORE_INT_REGS
popl %ebx
CFI_ADJUST_CFA_OFFSET -4
-@@ -331,7 +467,7 @@ ENTRY(ret_from_fork)
+@@ -331,7 +474,7 @@ ENTRY(ret_from_fork)
CFI_ADJUST_CFA_OFFSET -4
jmp syscall_exit
CFI_ENDPROC
@@ -17324,7 +17366,7 @@ index c097e7d..853746c 100644
/*
* Return to user mode is not as complex as all this looks,
-@@ -347,12 +483,29 @@ ret_from_exception:
+@@ -347,12 +490,29 @@ ret_from_exception:
preempt_stop(CLBR_ANY)
ret_from_intr:
GET_THREAD_INFO(%ebp)
@@ -17355,7 +17397,7 @@ index c097e7d..853746c 100644
ENTRY(resume_userspace)
LOCKDEP_SYS_EXIT
-@@ -364,8 +517,8 @@ ENTRY(resume_userspace)
+@@ -364,8 +524,8 @@ ENTRY(resume_userspace)
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?
jne work_pending
@@ -17366,7 +17408,7 @@ index c097e7d..853746c 100644
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
-@@ -380,7 +533,7 @@ need_resched:
+@@ -380,7 +540,7 @@ need_resched:
jz restore_all
call preempt_schedule_irq
jmp need_resched
@@ -17375,7 +17417,7 @@ index c097e7d..853746c 100644
#endif
CFI_ENDPROC
-@@ -414,25 +567,36 @@ sysenter_past_esp:
+@@ -414,25 +574,36 @@ sysenter_past_esp:
/*CFI_REL_OFFSET cs, 0*/
/*
* Push current_thread_info()->sysenter_return to the stack.
@@ -17415,7 +17457,18 @@ index c097e7d..853746c 100644
movl %ebp,PT_EBP(%esp)
.section __ex_table,"a"
.align 4
-@@ -455,12 +619,24 @@ sysenter_do_call:
+@@ -441,6 +612,10 @@ sysenter_past_esp:
+
+ GET_THREAD_INFO(%ebp)
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz sysenter_audit
+ sysenter_do_call:
+@@ -455,12 +630,24 @@ sysenter_do_call:
testl $_TIF_ALLWORK_MASK, %ecx
jne sysexit_audit
sysenter_exit:
@@ -17440,7 +17493,7 @@ index c097e7d..853746c 100644
PTGS_TO_GS
ENABLE_INTERRUPTS_SYSEXIT
-@@ -477,6 +653,9 @@ sysenter_audit:
+@@ -477,6 +664,9 @@ sysenter_audit:
movl %eax,%edx /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
call audit_syscall_entry
@@ -17450,7 +17503,7 @@ index c097e7d..853746c 100644
pushl %ebx
CFI_ADJUST_CFA_OFFSET 4
movl PT_EAX(%esp),%eax /* reload syscall number */
-@@ -504,11 +683,17 @@ sysexit_audit:
+@@ -504,11 +694,17 @@ sysexit_audit:
CFI_ENDPROC
.pushsection .fixup,"ax"
@@ -17470,7 +17523,19 @@ index c097e7d..853746c 100644
.popsection
PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target)
-@@ -538,6 +723,15 @@ syscall_exit:
+@@ -520,6 +716,11 @@ ENTRY(system_call)
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ # system call tracing in operation / emulation
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz syscall_trace_entry
+@@ -538,6 +739,15 @@ syscall_exit:
testl $_TIF_ALLWORK_MASK, %ecx # current->work
jne syscall_exit_work
@@ -17486,7 +17551,7 @@ index c097e7d..853746c 100644
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
-@@ -602,10 +796,29 @@ ldt_ss:
+@@ -602,10 +812,29 @@ ldt_ss:
mov PT_OLDESP(%esp), %eax /* load userspace esp */
mov %dx, %ax /* eax: new kernel esp */
sub %eax, %edx /* offset (low word is 0) */
@@ -17517,7 +17582,7 @@ index c097e7d..853746c 100644
pushl $__ESPFIX_SS
CFI_ADJUST_CFA_OFFSET 4
push %eax /* new kernel esp */
-@@ -636,36 +849,30 @@ work_resched:
+@@ -636,36 +865,30 @@ work_resched:
movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
@@ -17559,7 +17624,7 @@ index c097e7d..853746c 100644
# perform syscall exit tracing
ALIGN
-@@ -673,11 +880,14 @@ syscall_trace_entry:
+@@ -673,11 +896,14 @@ syscall_trace_entry:
movl $-ENOSYS,PT_EAX(%esp)
movl %esp, %eax
call syscall_trace_enter
@@ -17575,7 +17640,7 @@ index c097e7d..853746c 100644
# perform syscall exit tracing
ALIGN
-@@ -690,20 +900,24 @@ syscall_exit_work:
+@@ -690,20 +916,24 @@ syscall_exit_work:
movl %esp, %eax
call syscall_trace_leave
jmp resume_userspace
@@ -17603,7 +17668,7 @@ index c097e7d..853746c 100644
CFI_ENDPROC
/*
-@@ -726,6 +940,33 @@ PTREGSCALL(rt_sigreturn)
+@@ -726,6 +956,33 @@ PTREGSCALL(rt_sigreturn)
PTREGSCALL(vm86)
PTREGSCALL(vm86old)
@@ -17637,7 +17702,7 @@ index c097e7d..853746c 100644
.macro FIXUP_ESPFIX_STACK
/*
* Switch back for ESPFIX stack to the normal zerobased stack
-@@ -735,7 +976,13 @@ PTREGSCALL(vm86old)
+@@ -735,7 +992,13 @@ PTREGSCALL(vm86old)
* normal stack and adjusts ESP with the matching offset.
*/
/* fixup the stack */
@@ -17652,7 +17717,7 @@ index c097e7d..853746c 100644
mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
shl $16, %eax
-@@ -793,7 +1040,7 @@ vector=vector+1
+@@ -793,7 +1056,7 @@ vector=vector+1
.endr
2: jmp common_interrupt
.endr
@@ -17661,7 +17726,7 @@ index c097e7d..853746c 100644
.previous
END(interrupt)
-@@ -840,7 +1087,7 @@ ENTRY(coprocessor_error)
+@@ -840,7 +1103,7 @@ ENTRY(coprocessor_error)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17670,7 +17735,7 @@ index c097e7d..853746c 100644
ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
-@@ -850,7 +1097,7 @@ ENTRY(simd_coprocessor_error)
+@@ -850,7 +1113,7 @@ ENTRY(simd_coprocessor_error)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17679,7 +17744,7 @@ index c097e7d..853746c 100644
ENTRY(device_not_available)
RING0_INT_FRAME
-@@ -860,7 +1107,7 @@ ENTRY(device_not_available)
+@@ -860,7 +1123,7 @@ ENTRY(device_not_available)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17688,7 +17753,7 @@ index c097e7d..853746c 100644
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
-@@ -869,12 +1116,12 @@ ENTRY(native_iret)
+@@ -869,12 +1132,12 @@ ENTRY(native_iret)
.align 4
.long native_iret, iret_exc
.previous
@@ -17703,7 +17768,7 @@ index c097e7d..853746c 100644
#endif
ENTRY(overflow)
-@@ -885,7 +1132,7 @@ ENTRY(overflow)
+@@ -885,7 +1148,7 @@ ENTRY(overflow)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17712,7 +17777,7 @@ index c097e7d..853746c 100644
ENTRY(bounds)
RING0_INT_FRAME
-@@ -895,7 +1142,7 @@ ENTRY(bounds)
+@@ -895,7 +1158,7 @@ ENTRY(bounds)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17721,7 +17786,7 @@ index c097e7d..853746c 100644
ENTRY(invalid_op)
RING0_INT_FRAME
-@@ -905,7 +1152,7 @@ ENTRY(invalid_op)
+@@ -905,7 +1168,7 @@ ENTRY(invalid_op)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17730,7 +17795,7 @@ index c097e7d..853746c 100644
ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
-@@ -915,7 +1162,7 @@ ENTRY(coprocessor_segment_overrun)
+@@ -915,7 +1178,7 @@ ENTRY(coprocessor_segment_overrun)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17739,7 +17804,7 @@ index c097e7d..853746c 100644
ENTRY(invalid_TSS)
RING0_EC_FRAME
-@@ -923,7 +1170,7 @@ ENTRY(invalid_TSS)
+@@ -923,7 +1186,7 @@ ENTRY(invalid_TSS)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17748,7 +17813,7 @@ index c097e7d..853746c 100644
ENTRY(segment_not_present)
RING0_EC_FRAME
-@@ -931,7 +1178,7 @@ ENTRY(segment_not_present)
+@@ -931,7 +1194,7 @@ ENTRY(segment_not_present)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17757,7 +17822,7 @@ index c097e7d..853746c 100644
ENTRY(stack_segment)
RING0_EC_FRAME
-@@ -939,7 +1186,7 @@ ENTRY(stack_segment)
+@@ -939,7 +1202,7 @@ ENTRY(stack_segment)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17766,7 +17831,7 @@ index c097e7d..853746c 100644
ENTRY(alignment_check)
RING0_EC_FRAME
-@@ -947,7 +1194,7 @@ ENTRY(alignment_check)
+@@ -947,7 +1210,7 @@ ENTRY(alignment_check)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17775,7 +17840,7 @@ index c097e7d..853746c 100644
ENTRY(divide_error)
RING0_INT_FRAME
-@@ -957,7 +1204,7 @@ ENTRY(divide_error)
+@@ -957,7 +1220,7 @@ ENTRY(divide_error)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17784,7 +17849,7 @@ index c097e7d..853746c 100644
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
-@@ -968,7 +1215,7 @@ ENTRY(machine_check)
+@@ -968,7 +1231,7 @@ ENTRY(machine_check)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17793,7 +17858,7 @@ index c097e7d..853746c 100644
#endif
ENTRY(spurious_interrupt_bug)
-@@ -979,7 +1226,7 @@ ENTRY(spurious_interrupt_bug)
+@@ -979,7 +1242,7 @@ ENTRY(spurious_interrupt_bug)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17802,7 +17867,7 @@ index c097e7d..853746c 100644
ENTRY(kernel_thread_helper)
pushl $0 # fake return address for unwinder
-@@ -1095,7 +1342,7 @@ ENDPROC(xen_failsafe_callback)
+@@ -1095,7 +1358,7 @@ ENDPROC(xen_failsafe_callback)
ENTRY(mcount)
ret
@@ -17811,7 +17876,7 @@ index c097e7d..853746c 100644
ENTRY(ftrace_caller)
cmpl $0, function_trace_stop
-@@ -1124,7 +1371,7 @@ ftrace_graph_call:
+@@ -1124,7 +1387,7 @@ ftrace_graph_call:
.globl ftrace_stub
ftrace_stub:
ret
@@ -17820,7 +17885,7 @@ index c097e7d..853746c 100644
#else /* ! CONFIG_DYNAMIC_FTRACE */
-@@ -1160,7 +1407,7 @@ trace:
+@@ -1160,7 +1423,7 @@ trace:
popl %ecx
popl %eax
jmp ftrace_stub
@@ -17829,7 +17894,7 @@ index c097e7d..853746c 100644
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
-@@ -1181,7 +1428,7 @@ ENTRY(ftrace_graph_caller)
+@@ -1181,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
popl %ecx
popl %eax
ret
@@ -17838,7 +17903,7 @@ index c097e7d..853746c 100644
.globl return_to_handler
return_to_handler:
-@@ -1198,7 +1445,6 @@ return_to_handler:
+@@ -1198,7 +1461,6 @@ return_to_handler:
ret
#endif
@@ -17846,7 +17911,7 @@ index c097e7d..853746c 100644
#include "syscall_table_32.S"
syscall_table_size=(.-sys_call_table)
-@@ -1255,15 +1501,18 @@ error_code:
+@@ -1255,15 +1517,18 @@ error_code:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -17867,7 +17932,7 @@ index c097e7d..853746c 100644
/*
* Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1309,7 +1558,7 @@ debug_stack_correct:
+@@ -1309,7 +1574,7 @@ debug_stack_correct:
call do_debug
jmp ret_from_exception
CFI_ENDPROC
@@ -17876,7 +17941,7 @@ index c097e7d..853746c 100644
/*
* NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1351,6 +1600,9 @@ nmi_stack_correct:
+@@ -1351,6 +1616,9 @@ nmi_stack_correct:
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
@@ -17886,7 +17951,7 @@ index c097e7d..853746c 100644
jmp restore_all_notrace
CFI_ENDPROC
-@@ -1391,12 +1643,15 @@ nmi_espfix_stack:
+@@ -1391,12 +1659,15 @@ nmi_espfix_stack:
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
@@ -17903,7 +17968,7 @@ index c097e7d..853746c 100644
ENTRY(int3)
RING0_INT_FRAME
-@@ -1409,7 +1664,7 @@ ENTRY(int3)
+@@ -1409,7 +1680,7 @@ ENTRY(int3)
call do_int3
jmp ret_from_exception
CFI_ENDPROC
@@ -17912,7 +17977,7 @@ index c097e7d..853746c 100644
ENTRY(general_protection)
RING0_EC_FRAME
-@@ -1417,7 +1672,7 @@ ENTRY(general_protection)
+@@ -1417,7 +1688,7 @@ ENTRY(general_protection)
CFI_ADJUST_CFA_OFFSET 4
jmp error_code
CFI_ENDPROC
@@ -17922,7 +17987,7 @@ index c097e7d..853746c 100644
/*
* End of kprobes section
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 34a56a9..74613c5 100644
+index 34a56a9..0d13843 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -53,6 +53,8 @@
@@ -17998,7 +18063,7 @@ index 34a56a9..74613c5 100644
retq
#endif
-@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
+@@ -174,6 +182,280 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -18227,12 +18292,9 @@ index 34a56a9..74613c5 100644
+.endm
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+/*
-+ * r11: thread_info
-+ * rcx, rdx: can be clobbered
-+ */
+ENTRY(pax_erase_kstack)
+ pushq %rdi
++ pushq %rcx
+ pushq %rax
+ pushq %r11
+
@@ -18273,6 +18335,7 @@ index 34a56a9..74613c5 100644
+
+ popq %r11
+ popq %rax
++ popq %rcx
+ popq %rdi
+ pax_force_retaddr
+ ret
@@ -18281,7 +18344,7 @@ index 34a56a9..74613c5 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
+@@ -233,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro UNFAKE_STACK_FRAME
@@ -18292,7 +18355,7 @@ index 34a56a9..74613c5 100644
.endm
/*
-@@ -317,7 +601,7 @@ ENTRY(save_args)
+@@ -317,7 +599,7 @@ ENTRY(save_args)
leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
movq_cfi rbp, 8 /* push %rbp */
leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
@@ -18301,7 +18364,7 @@ index 34a56a9..74613c5 100644
je 1f
SWAPGS
/*
-@@ -337,9 +621,10 @@ ENTRY(save_args)
+@@ -337,9 +619,10 @@ ENTRY(save_args)
* We entered an interrupt context - irqs are off:
*/
2: TRACE_IRQS_OFF
@@ -18313,7 +18376,7 @@ index 34a56a9..74613c5 100644
ENTRY(save_rest)
PARTIAL_FRAME 1 REST_SKIP+8
-@@ -352,9 +637,10 @@ ENTRY(save_rest)
+@@ -352,9 +635,10 @@ ENTRY(save_rest)
movq_cfi r15, R15+16
movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
@@ -18325,7 +18388,7 @@ index 34a56a9..74613c5 100644
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
-@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
+@@ -383,9 +667,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -18338,7 +18401,7 @@ index 34a56a9..74613c5 100644
.popsection
/*
-@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
+@@ -409,7 +694,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -18347,7 +18410,7 @@ index 34a56a9..74613c5 100644
je int_ret_from_sys_call
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
+@@ -419,7 +704,7 @@ ENTRY(ret_from_fork)
jmp ret_from_sys_call # go to the SYSRET fastpath
CFI_ENDPROC
@@ -18356,7 +18419,7 @@ index 34a56a9..74613c5 100644
/*
* System call entry. Upto 6 arguments in registers are supported.
-@@ -455,7 +742,7 @@ END(ret_from_fork)
+@@ -455,7 +740,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -18365,12 +18428,17 @@ index 34a56a9..74613c5 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
+@@ -468,12 +753,18 @@ ENTRY(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
+ SAVE_ARGS 8*6,1
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
/*
* No need to follow this irqs off/on section - it's straight
* and short:
@@ -18380,7 +18448,7 @@ index 34a56a9..74613c5 100644
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
-@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
+@@ -483,7 +774,7 @@ ENTRY(system_call_after_swapgs)
system_call_fastpath:
cmpq $__NR_syscall_max,%rax
ja badsys
@@ -18389,7 +18457,7 @@ index 34a56a9..74613c5 100644
call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp)
/*
-@@ -502,6 +790,8 @@ sysret_check:
+@@ -502,6 +793,8 @@ sysret_check:
andl %edi,%edx
jnz sysret_careful
CFI_REMEMBER_STATE
@@ -18398,7 +18466,7 @@ index 34a56a9..74613c5 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -555,14 +845,18 @@ badsys:
+@@ -555,14 +848,18 @@ badsys:
* jump back to the normal fast path.
*/
auditsys:
@@ -18418,7 +18486,7 @@ index 34a56a9..74613c5 100644
jmp system_call_fastpath
/*
-@@ -592,16 +886,20 @@ tracesys:
+@@ -592,16 +889,20 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -18440,7 +18508,7 @@ index 34a56a9..74613c5 100644
call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
-@@ -613,7 +911,7 @@ tracesys:
+@@ -613,7 +914,7 @@ tracesys:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -18449,15 +18517,18 @@ index 34a56a9..74613c5 100644
je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
-@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
+@@ -624,7 +925,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
+- jmp retint_swapgs
++ pax_exit_kernel_user
+ pax_erase_kstack
- jmp retint_swapgs
++ jmp retint_swapgs_pax
/* Either reschedule or signal or syscall exit tracking needed. */
-@@ -674,7 +973,7 @@ int_restore_rest:
+ /* First do a reschedule test. */
+@@ -674,7 +977,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -18466,7 +18537,7 @@ index 34a56a9..74613c5 100644
/*
* Certain special system calls that need to save a complete full stack frame.
-@@ -690,7 +989,7 @@ ENTRY(\label)
+@@ -690,7 +993,7 @@ ENTRY(\label)
call \func
jmp ptregscall_common
CFI_ENDPROC
@@ -18475,7 +18546,7 @@ index 34a56a9..74613c5 100644
.endm
PTREGSCALL stub_clone, sys_clone, %r8
-@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
+@@ -708,9 +1011,10 @@ ENTRY(ptregscall_common)
movq_cfi_restore R12+8, r12
movq_cfi_restore RBP+8, rbp
movq_cfi_restore RBX+8, rbx
@@ -18487,7 +18558,7 @@ index 34a56a9..74613c5 100644
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
+@@ -726,7 +1030,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -18496,7 +18567,7 @@ index 34a56a9..74613c5 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -744,7 +1048,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -18505,7 +18576,7 @@ index 34a56a9..74613c5 100644
/*
* Build the entry stubs and pointer table with some assembler magic.
-@@ -780,7 +1080,7 @@ vector=vector+1
+@@ -780,7 +1084,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -18514,7 +18585,7 @@ index 34a56a9..74613c5 100644
.previous
END(interrupt)
-@@ -800,6 +1100,16 @@ END(interrupt)
+@@ -800,6 +1104,16 @@ END(interrupt)
CFI_ADJUST_CFA_OFFSET 10*8
call save_args
PARTIAL_FRAME 0
@@ -18531,7 +18602,7 @@ index 34a56a9..74613c5 100644
call \func
.endm
-@@ -822,7 +1132,7 @@ ret_from_intr:
+@@ -822,7 +1136,7 @@ ret_from_intr:
CFI_ADJUST_CFA_OFFSET -8
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -18540,11 +18611,12 @@ index 34a56a9..74613c5 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
+@@ -844,12 +1158,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
+ pax_exit_kernel_user
++retint_swapgs_pax:
TRACE_IRQS_IRETQ
SWAPGS
jmp restore_args
@@ -18556,7 +18628,7 @@ index 34a56a9..74613c5 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
+@@ -940,7 +1258,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -18565,7 +18637,7 @@ index 34a56a9..74613c5 100644
/*
* APIC interrupts.
-@@ -953,7 +1266,7 @@ ENTRY(\sym)
+@@ -953,7 +1271,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -18574,7 +18646,7 @@ index 34a56a9..74613c5 100644
.endm
#ifdef CONFIG_SMP
-@@ -1032,12 +1345,22 @@ ENTRY(\sym)
+@@ -1032,12 +1350,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET 15*8
call error_entry
DEFAULT_FRAME 0
@@ -18598,7 +18670,7 @@ index 34a56a9..74613c5 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1049,12 +1372,22 @@ ENTRY(\sym)
+@@ -1049,12 +1377,22 @@ ENTRY(\sym)
subq $15*8, %rsp
call save_paranoid
TRACE_IRQS_OFF
@@ -18622,7 +18694,7 @@ index 34a56a9..74613c5 100644
.endm
.macro paranoidzeroentry_ist sym do_sym ist
-@@ -1066,15 +1399,30 @@ ENTRY(\sym)
+@@ -1066,15 +1404,30 @@ ENTRY(\sym)
subq $15*8, %rsp
call save_paranoid
TRACE_IRQS_OFF
@@ -18655,7 +18727,7 @@ index 34a56a9..74613c5 100644
.endm
.macro errorentry sym do_sym
-@@ -1085,13 +1433,23 @@ ENTRY(\sym)
+@@ -1085,13 +1438,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET 15*8
call error_entry
DEFAULT_FRAME 0
@@ -18680,7 +18752,7 @@ index 34a56a9..74613c5 100644
.endm
/* error code is on the stack already */
-@@ -1104,13 +1462,23 @@ ENTRY(\sym)
+@@ -1104,13 +1467,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -18705,7 +18777,7 @@ index 34a56a9..74613c5 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1141,9 +1509,10 @@ gs_change:
+@@ -1141,9 +1514,10 @@ gs_change:
SWAPGS
popf
CFI_ADJUST_CFA_OFFSET -8
@@ -18717,7 +18789,7 @@ index 34a56a9..74613c5 100644
.section __ex_table,"a"
.align 8
-@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
+@@ -1193,11 +1567,12 @@ ENTRY(kernel_thread)
* of hacks for example to fork off the per-CPU idle tasks.
* [Hopefully no generic code relies on the reschedule -AK]
*/
@@ -18732,7 +18804,7 @@ index 34a56a9..74613c5 100644
ENTRY(child_rip)
pushq $0 # fake return address
-@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
+@@ -1208,13 +1583,14 @@ ENTRY(child_rip)
*/
movq %rdi, %rax
movq %rsi, %rdi
@@ -18748,7 +18820,7 @@ index 34a56a9..74613c5 100644
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
+@@ -1241,11 +1617,11 @@ ENTRY(kernel_execve)
RESTORE_REST
testq %rax,%rax
je int_ret_from_sys_call
@@ -18762,7 +18834,7 @@ index 34a56a9..74613c5 100644
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
-@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
+@@ -1263,9 +1639,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -18774,7 +18846,7 @@ index 34a56a9..74613c5 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1303,7 +1680,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -18783,7 +18855,7 @@ index 34a56a9..74613c5 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1362,7 +1739,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -18792,7 +18864,7 @@ index 34a56a9..74613c5 100644
#endif /* CONFIG_XEN */
-@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
+@@ -1405,16 +1782,31 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
@@ -18825,7 +18897,7 @@ index 34a56a9..74613c5 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1443,7 +1830,7 @@ paranoid_schedule:
+@@ -1443,7 +1835,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -18834,7 +18906,7 @@ index 34a56a9..74613c5 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
+@@ -1470,12 +1862,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -18849,7 +18921,7 @@ index 34a56a9..74613c5 100644
ret
CFI_ENDPROC
-@@ -1497,7 +1885,7 @@ error_kernelspace:
+@@ -1497,7 +1890,7 @@ error_kernelspace:
cmpq $gs_change,RIP+8(%rsp)
je error_swapgs
jmp error_sti
@@ -18858,7 +18930,7 @@ index 34a56a9..74613c5 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
+@@ -1517,7 +1910,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -18867,7 +18939,7 @@ index 34a56a9..74613c5 100644
/* runs on exception stack */
-@@ -1529,6 +1917,16 @@ ENTRY(nmi)
+@@ -1529,6 +1922,16 @@ ENTRY(nmi)
CFI_ADJUST_CFA_OFFSET 15*8
call save_paranoid
DEFAULT_FRAME 0
@@ -18884,7 +18956,7 @@ index 34a56a9..74613c5 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1539,12 +1937,28 @@ ENTRY(nmi)
+@@ -1539,12 +1942,28 @@ ENTRY(nmi)
DISABLE_INTERRUPTS(CLBR_NONE)
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
@@ -18914,7 +18986,7 @@ index 34a56a9..74613c5 100644
jmp irq_return
nmi_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1573,14 +1987,14 @@ nmi_schedule:
+@@ -1573,14 +1992,14 @@ nmi_schedule:
jmp paranoid_exit
CFI_ENDPROC
#endif
@@ -68795,7 +68867,7 @@ index 90a6087..fa05803 100644
if (rc < 0)
goto out_free;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
-index f539204..068db1f 100644
+index f539204..b2ad18e 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -200,6 +200,12 @@ struct eventpoll {
@@ -69086,8 +69158,8 @@ index f539204..068db1f 100644
+ error = PTR_ERR(file);
+ goto out_free_fd;
+ }
-+ fd_install(fd, file);
+ ep->file = file;
++ fd_install(fd, file);
+ return fd;
+out_free_fd:
@@ -107553,10 +107625,10 @@ index d52f7a0..b66cdd9 100755
rm -f tags
xtags ctags
diff --git a/security/Kconfig b/security/Kconfig
-index fb363cd..124d914 100644
+index fb363cd..a34a964 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,870 @@
+@@ -4,6 +4,882 @@
menu "Security options"
@@ -108140,6 +108212,10 @@ index fb363cd..124d914 100644
+ Select the method used to instrument function pointer dereferences.
+ Note that binary modules cannot be instrumented by this approach.
+
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
+ config PAX_KERNEXEC_PLUGIN_METHOD_BTS
+ bool "bts"
+ help
@@ -108313,11 +108389,12 @@ index fb363cd..124d914 100644
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
-+ Note: full support for this feature requires gcc with plugin support
-+ so make sure your compiler is at least gcc 4.5.0. Using older gcc
-+ versions means that functions with large enough stack frames may
-+ leave uninitialized memory behind that may be exposed to a later
-+ syscall leaking the stack.
++ Note that the full feature requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package. Using
++ older gcc versions means that functions with large enough stack
++ frames may leave uninitialized memory behind that may be exposed
++ to a later syscall leaking the stack.
+
+config PAX_MEMORY_UDEREF
+ bool "Prevent invalid userland pointer dereference"
@@ -108395,11 +108472,14 @@ index fb363cd..124d914 100644
+ arguments marked by a size_overflow attribute with double integer
+ precision (DImode/TImode for 32/64 bit integer types).
+
-+ The recomputed argument is checked against INT_MAX and an event
++ The recomputed argument is checked against TYPE_MAX and an event
+ is logged on overflow and the triggering process is killed.
+
-+ Homepage:
-+ http://www.grsecurity.net/~ephox/overflow_plugin/
++ Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
+
+config PAX_LATENT_ENTROPY
+ bool "Generate some entropy during boot"
@@ -108411,6 +108491,10 @@ index fb363cd..124d914 100644
+ there is little 'natural' source of entropy normally. The cost
+ is some slowdown of the boot process.
+
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
@@ -108427,7 +108511,7 @@ index fb363cd..124d914 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -146,7 +1010,7 @@ config INTEL_TXT
+@@ -146,7 +1022,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/3.2.28/0000_README b/3.2.28/0000_README
index af762d4..8e8f3c9 100644
--- a/3.2.28/0000_README
+++ b/3.2.28/0000_README
@@ -30,7 +30,7 @@ Patch: 1027_linux-3.2.28.patch
From: http://www.kernel.org
Desc: Linux 3.2.28
-Patch: 4420_grsecurity-2.9.1-3.2.28-201208232048.patch
+Patch: 4420_grsecurity-2.9.1-3.2.28-201208271905.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.28/4420_grsecurity-2.9.1-3.2.28-201208232048.patch b/3.2.28/4420_grsecurity-2.9.1-3.2.28-201208271905.patch
index 3457f14..11d1b8e 100644
--- a/3.2.28/4420_grsecurity-2.9.1-3.2.28-201208232048.patch
+++ b/3.2.28/4420_grsecurity-2.9.1-3.2.28-201208271905.patch
@@ -4435,6 +4435,26 @@ index a50b5ec..547078a 100644
regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
+index f2496f2..4e3cc47 100644
+--- a/arch/powerpc/kernel/syscalls.c
++++ b/arch/powerpc/kernel/syscalls.c
+@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
+ long ret;
+
+ if (personality(current->personality) == PER_LINUX32
+- && personality == PER_LINUX)
+- personality = PER_LINUX32;
++ && personality(personality) == PER_LINUX)
++ personality = (personality & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+ }
+ #endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 5459d14..10f8070 100644
--- a/arch/powerpc/kernel/traps.c
@@ -8730,7 +8750,7 @@ index 6557769..ef6ae89 100644
if (err)
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index a6253ec..4ad2120 100644
+index a6253ec..0a325de 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -13,7 +13,9 @@
@@ -8789,7 +8809,7 @@ index a6253ec..4ad2120 100644
movl %ebp,%ebp /* zero extension */
pushq_cfi $__USER32_DS
/*CFI_REL_OFFSET ss,0*/
-@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
+@@ -134,25 +156,44 @@ ENTRY(ia32_sysenter_target)
CFI_REL_OFFSET rsp,0
pushfq_cfi
/*CFI_REL_OFFSET rflags,0*/
@@ -8809,6 +8829,11 @@ index a6253ec..4ad2120 100644
cld
SAVE_ARGS 0,1,0
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs, here we enable it straight after entry:
@@ -8835,7 +8860,7 @@ index a6253ec..4ad2120 100644
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -162,13 +198,15 @@ sysenter_do_call:
+@@ -162,13 +203,15 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -8854,7 +8879,7 @@ index a6253ec..4ad2120 100644
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
movl RIP-R11(%rsp),%edx /* User %eip */
-@@ -194,6 +232,9 @@ sysexit_from_sys_call:
+@@ -194,6 +237,9 @@ sysexit_from_sys_call:
movl %eax,%esi /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
call audit_syscall_entry
@@ -8864,7 +8889,7 @@ index a6253ec..4ad2120 100644
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -205,7 +246,7 @@ sysexit_from_sys_call:
+@@ -205,7 +251,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
@@ -8873,7 +8898,7 @@ index a6253ec..4ad2120 100644
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
sti
-@@ -215,12 +256,12 @@ sysexit_from_sys_call:
+@@ -215,12 +261,12 @@ sysexit_from_sys_call:
movzbl %al,%edi /* zero-extend that into %edi */
inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
call audit_syscall_exit
@@ -8888,7 +8913,7 @@ index a6253ec..4ad2120 100644
jz \exit
CLEAR_RREGS -ARGOFFSET
jmp int_with_check
-@@ -238,7 +279,7 @@ sysexit_audit:
+@@ -238,7 +284,7 @@ sysexit_audit:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -8897,17 +8922,17 @@ index a6253ec..4ad2120 100644
jz sysenter_auditsys
#endif
SAVE_REST
-@@ -246,6 +287,9 @@ sysenter_tracesys:
- movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
+@@ -250,6 +296,9 @@ sysenter_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
+ jmp sysenter_do_call
+ CFI_ENDPROC
+ ENDPROC(ia32_sysenter_target)
+@@ -277,19 +326,25 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
@@ -8921,6 +8946,11 @@ index a6253ec..4ad2120 100644
movq PER_CPU_VAR(kernel_stack),%rsp
+ SAVE_ARGS 8*6,0,0
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
@@ -8930,7 +8960,7 @@ index a6253ec..4ad2120 100644
movl %eax,%eax /* zero extension */
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
+@@ -305,13 +360,19 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been
32bit zero extended */
/* hardware stack frame is complete now */
@@ -8953,7 +8983,7 @@ index a6253ec..4ad2120 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -321,13 +372,15 @@ cstar_do_call:
+@@ -321,13 +382,15 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
@@ -8972,7 +9002,7 @@ index a6253ec..4ad2120 100644
RESTORE_ARGS 0,-ARG_SKIP,0,0,0
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
-@@ -355,7 +408,7 @@ sysretl_audit:
+@@ -355,7 +418,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
@@ -8981,17 +9011,17 @@ index a6253ec..4ad2120 100644
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -364,6 +417,9 @@ cstar_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
+@@ -369,6 +432,9 @@ cstar_tracesys:
+ xchgl %ebp,%r9d
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- xchgl %ebp,%r9d
-@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
+ jmp cstar_do_call
+ END(ia32_cstar_target)
+
+@@ -409,20 +475,26 @@ ENTRY(ia32_syscall)
CFI_REL_OFFSET rip,RIP-RIP
PARAVIRT_ADJUST_EXCEPTION_FRAME
SWAPGS
@@ -9010,6 +9040,11 @@ index a6253ec..4ad2120 100644
- orl $TS_COMPAT,TI_status(%r10)
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
@@ -9021,17 +9056,17 @@ index a6253ec..4ad2120 100644
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -441,6 +498,9 @@ ia32_tracesys:
- movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
- movq %rsp,%rdi /* &pt_regs -> arg1 */
- call syscall_trace_enter
+@@ -445,6 +517,9 @@ ia32_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
- RESTORE_REST
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -455,6 +515,7 @@ ia32_badsys:
+ jmp ia32_do_call
+ END(ia32_syscall)
+
+@@ -455,6 +530,7 @@ ia32_badsys:
quiet_ni_syscall:
movq $-ENOSYS,%rax
@@ -14925,10 +14960,10 @@ index cd28a35..c72ed9a 100644
#include <asm/processor.h>
#include <asm/fcntl.h>
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index bcda816..5c89791 100644
+index bcda816..cbab6db 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
-@@ -180,13 +180,146 @@
+@@ -180,13 +180,153 @@
/*CFI_REL_OFFSET gs, PT_GS*/
.endm
.macro SET_KERNEL_GS reg
@@ -15032,10 +15067,10 @@ index bcda816..5c89791 100644
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
+/*
+ * ebp: thread_info
-+ * ecx, edx: can be clobbered
+ */
+ENTRY(pax_erase_kstack)
+ pushl %edi
++ pushl %ecx
+ pushl %eax
+
+ mov TI_lowest_stack(%ebp), %edi
@@ -15059,6 +15094,12 @@ index bcda816..5c89791 100644
+2: cld
+ mov %esp, %ecx
+ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %ecx
++ jb 3f
++ ud2
++3:
++
+ shr $2, %ecx
+ rep stosl
+
@@ -15067,6 +15108,7 @@ index bcda816..5c89791 100644
+ mov %edi, TI_lowest_stack(%ebp)
+
+ popl %eax
++ popl %ecx
+ popl %edi
+ ret
+ENDPROC(pax_erase_kstack)
@@ -15076,7 +15118,7 @@ index bcda816..5c89791 100644
cld
PUSH_GS
pushl_cfi %fs
-@@ -209,7 +342,7 @@
+@@ -209,7 +349,7 @@
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
@@ -15085,7 +15127,7 @@ index bcda816..5c89791 100644
movl %edx, %ds
movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
-@@ -217,6 +350,15 @@
+@@ -217,6 +357,15 @@
SET_KERNEL_GS %edx
.endm
@@ -15101,7 +15143,7 @@ index bcda816..5c89791 100644
.macro RESTORE_INT_REGS
popl_cfi %ebx
CFI_RESTORE ebx
-@@ -302,7 +444,7 @@ ENTRY(ret_from_fork)
+@@ -302,7 +451,7 @@ ENTRY(ret_from_fork)
popfl_cfi
jmp syscall_exit
CFI_ENDPROC
@@ -15110,7 +15152,7 @@ index bcda816..5c89791 100644
/*
* Interrupt exit functions should be protected against kprobes
-@@ -336,7 +478,15 @@ resume_userspace_sig:
+@@ -336,7 +485,15 @@ resume_userspace_sig:
andl $SEGMENT_RPL_MASK, %eax
#endif
cmpl $USER_RPL, %eax
@@ -15126,7 +15168,7 @@ index bcda816..5c89791 100644
ENTRY(resume_userspace)
LOCKDEP_SYS_EXIT
-@@ -348,8 +498,8 @@ ENTRY(resume_userspace)
+@@ -348,8 +505,8 @@ ENTRY(resume_userspace)
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?
jne work_pending
@@ -15137,7 +15179,7 @@ index bcda816..5c89791 100644
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
-@@ -364,7 +514,7 @@ need_resched:
+@@ -364,7 +521,7 @@ need_resched:
jz restore_all
call preempt_schedule_irq
jmp need_resched
@@ -15146,7 +15188,7 @@ index bcda816..5c89791 100644
#endif
CFI_ENDPROC
/*
-@@ -398,23 +548,34 @@ sysenter_past_esp:
+@@ -398,23 +555,34 @@ sysenter_past_esp:
/*CFI_REL_OFFSET cs, 0*/
/*
* Push current_thread_info()->sysenter_return to the stack.
@@ -15184,7 +15226,18 @@ index bcda816..5c89791 100644
movl %ebp,PT_EBP(%esp)
.section __ex_table,"a"
.align 4
-@@ -437,12 +598,24 @@ sysenter_do_call:
+@@ -423,6 +591,10 @@ sysenter_past_esp:
+
+ GET_THREAD_INFO(%ebp)
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz sysenter_audit
+ sysenter_do_call:
+@@ -437,12 +609,24 @@ sysenter_do_call:
testl $_TIF_ALLWORK_MASK, %ecx
jne sysexit_audit
sysenter_exit:
@@ -15209,7 +15262,7 @@ index bcda816..5c89791 100644
PTGS_TO_GS
ENABLE_INTERRUPTS_SYSEXIT
-@@ -459,6 +632,9 @@ sysenter_audit:
+@@ -459,6 +643,9 @@ sysenter_audit:
movl %eax,%edx /* 2nd arg: syscall number */
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
call audit_syscall_entry
@@ -15219,7 +15272,7 @@ index bcda816..5c89791 100644
pushl_cfi %ebx
movl PT_EAX(%esp),%eax /* reload syscall number */
jmp sysenter_do_call
-@@ -485,11 +661,17 @@ sysexit_audit:
+@@ -485,11 +672,17 @@ sysexit_audit:
CFI_ENDPROC
.pushsection .fixup,"ax"
@@ -15239,7 +15292,19 @@ index bcda816..5c89791 100644
.popsection
PTGS_TO_GS_EX
ENDPROC(ia32_sysenter_target)
-@@ -522,6 +704,15 @@ syscall_exit:
+@@ -504,6 +697,11 @@ ENTRY(system_call)
+ pushl_cfi %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ # system call tracing in operation / emulation
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz syscall_trace_entry
+@@ -522,6 +720,15 @@ syscall_exit:
testl $_TIF_ALLWORK_MASK, %ecx # current->work
jne syscall_exit_work
@@ -15255,7 +15320,7 @@ index bcda816..5c89791 100644
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
-@@ -581,14 +772,34 @@ ldt_ss:
+@@ -581,14 +788,34 @@ ldt_ss:
* compensating for the offset by changing to the ESPFIX segment with
* a base address that matches for the difference.
*/
@@ -15293,7 +15358,7 @@ index bcda816..5c89791 100644
pushl_cfi $__ESPFIX_SS
pushl_cfi %eax /* new kernel esp */
/* Disable interrupts, but do not irqtrace this section: we
-@@ -617,34 +828,28 @@ work_resched:
+@@ -617,34 +844,28 @@ work_resched:
movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
@@ -15333,7 +15398,7 @@ index bcda816..5c89791 100644
# perform syscall exit tracing
ALIGN
-@@ -652,11 +857,14 @@ syscall_trace_entry:
+@@ -652,11 +873,14 @@ syscall_trace_entry:
movl $-ENOSYS,PT_EAX(%esp)
movl %esp, %eax
call syscall_trace_enter
@@ -15349,7 +15414,7 @@ index bcda816..5c89791 100644
# perform syscall exit tracing
ALIGN
-@@ -669,20 +877,24 @@ syscall_exit_work:
+@@ -669,20 +893,24 @@ syscall_exit_work:
movl %esp, %eax
call syscall_trace_leave
jmp resume_userspace
@@ -15377,7 +15442,7 @@ index bcda816..5c89791 100644
CFI_ENDPROC
/*
* End of kprobes section
-@@ -756,6 +968,36 @@ ptregs_clone:
+@@ -756,6 +984,36 @@ ptregs_clone:
CFI_ENDPROC
ENDPROC(ptregs_clone)
@@ -15414,7 +15479,7 @@ index bcda816..5c89791 100644
.macro FIXUP_ESPFIX_STACK
/*
* Switch back for ESPFIX stack to the normal zerobased stack
-@@ -765,8 +1007,15 @@ ENDPROC(ptregs_clone)
+@@ -765,8 +1023,15 @@ ENDPROC(ptregs_clone)
* normal stack and adjusts ESP with the matching offset.
*/
/* fixup the stack */
@@ -15432,7 +15497,7 @@ index bcda816..5c89791 100644
shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */
pushl_cfi $__KERNEL_DS
-@@ -819,7 +1068,7 @@ vector=vector+1
+@@ -819,7 +1084,7 @@ vector=vector+1
.endr
2: jmp common_interrupt
.endr
@@ -15441,7 +15506,7 @@ index bcda816..5c89791 100644
.previous
END(interrupt)
-@@ -867,7 +1116,7 @@ ENTRY(coprocessor_error)
+@@ -867,7 +1132,7 @@ ENTRY(coprocessor_error)
pushl_cfi $do_coprocessor_error
jmp error_code
CFI_ENDPROC
@@ -15450,7 +15515,7 @@ index bcda816..5c89791 100644
ENTRY(simd_coprocessor_error)
RING0_INT_FRAME
-@@ -888,7 +1137,7 @@ ENTRY(simd_coprocessor_error)
+@@ -888,7 +1153,7 @@ ENTRY(simd_coprocessor_error)
#endif
jmp error_code
CFI_ENDPROC
@@ -15459,7 +15524,7 @@ index bcda816..5c89791 100644
ENTRY(device_not_available)
RING0_INT_FRAME
-@@ -896,7 +1145,7 @@ ENTRY(device_not_available)
+@@ -896,7 +1161,7 @@ ENTRY(device_not_available)
pushl_cfi $do_device_not_available
jmp error_code
CFI_ENDPROC
@@ -15468,7 +15533,7 @@ index bcda816..5c89791 100644
#ifdef CONFIG_PARAVIRT
ENTRY(native_iret)
-@@ -905,12 +1154,12 @@ ENTRY(native_iret)
+@@ -905,12 +1170,12 @@ ENTRY(native_iret)
.align 4
.long native_iret, iret_exc
.previous
@@ -15483,7 +15548,7 @@ index bcda816..5c89791 100644
#endif
ENTRY(overflow)
-@@ -919,7 +1168,7 @@ ENTRY(overflow)
+@@ -919,7 +1184,7 @@ ENTRY(overflow)
pushl_cfi $do_overflow
jmp error_code
CFI_ENDPROC
@@ -15492,7 +15557,7 @@ index bcda816..5c89791 100644
ENTRY(bounds)
RING0_INT_FRAME
-@@ -927,7 +1176,7 @@ ENTRY(bounds)
+@@ -927,7 +1192,7 @@ ENTRY(bounds)
pushl_cfi $do_bounds
jmp error_code
CFI_ENDPROC
@@ -15501,7 +15566,7 @@ index bcda816..5c89791 100644
ENTRY(invalid_op)
RING0_INT_FRAME
-@@ -935,7 +1184,7 @@ ENTRY(invalid_op)
+@@ -935,7 +1200,7 @@ ENTRY(invalid_op)
pushl_cfi $do_invalid_op
jmp error_code
CFI_ENDPROC
@@ -15510,7 +15575,7 @@ index bcda816..5c89791 100644
ENTRY(coprocessor_segment_overrun)
RING0_INT_FRAME
-@@ -943,35 +1192,35 @@ ENTRY(coprocessor_segment_overrun)
+@@ -943,35 +1208,35 @@ ENTRY(coprocessor_segment_overrun)
pushl_cfi $do_coprocessor_segment_overrun
jmp error_code
CFI_ENDPROC
@@ -15551,7 +15616,7 @@ index bcda816..5c89791 100644
ENTRY(divide_error)
RING0_INT_FRAME
-@@ -979,7 +1228,7 @@ ENTRY(divide_error)
+@@ -979,7 +1244,7 @@ ENTRY(divide_error)
pushl_cfi $do_divide_error
jmp error_code
CFI_ENDPROC
@@ -15560,7 +15625,7 @@ index bcda816..5c89791 100644
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
-@@ -988,7 +1237,7 @@ ENTRY(machine_check)
+@@ -988,7 +1253,7 @@ ENTRY(machine_check)
pushl_cfi machine_check_vector
jmp error_code
CFI_ENDPROC
@@ -15569,7 +15634,7 @@ index bcda816..5c89791 100644
#endif
ENTRY(spurious_interrupt_bug)
-@@ -997,7 +1246,7 @@ ENTRY(spurious_interrupt_bug)
+@@ -997,7 +1262,7 @@ ENTRY(spurious_interrupt_bug)
pushl_cfi $do_spurious_interrupt_bug
jmp error_code
CFI_ENDPROC
@@ -15578,7 +15643,7 @@ index bcda816..5c89791 100644
/*
* End of kprobes section
*/
-@@ -1112,7 +1361,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
+@@ -1112,7 +1377,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
ENTRY(mcount)
ret
@@ -15587,7 +15652,7 @@ index bcda816..5c89791 100644
ENTRY(ftrace_caller)
cmpl $0, function_trace_stop
-@@ -1141,7 +1390,7 @@ ftrace_graph_call:
+@@ -1141,7 +1406,7 @@ ftrace_graph_call:
.globl ftrace_stub
ftrace_stub:
ret
@@ -15596,7 +15661,7 @@ index bcda816..5c89791 100644
#else /* ! CONFIG_DYNAMIC_FTRACE */
-@@ -1177,7 +1426,7 @@ trace:
+@@ -1177,7 +1442,7 @@ trace:
popl %ecx
popl %eax
jmp ftrace_stub
@@ -15605,7 +15670,7 @@ index bcda816..5c89791 100644
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
-@@ -1198,7 +1447,7 @@ ENTRY(ftrace_graph_caller)
+@@ -1198,7 +1463,7 @@ ENTRY(ftrace_graph_caller)
popl %ecx
popl %eax
ret
@@ -15614,7 +15679,7 @@ index bcda816..5c89791 100644
.globl return_to_handler
return_to_handler:
-@@ -1212,7 +1461,6 @@ return_to_handler:
+@@ -1212,7 +1477,6 @@ return_to_handler:
jmp *%ecx
#endif
@@ -15622,7 +15687,7 @@ index bcda816..5c89791 100644
#include "syscall_table_32.S"
syscall_table_size=(.-sys_call_table)
-@@ -1258,15 +1506,18 @@ error_code:
+@@ -1258,15 +1522,18 @@ error_code:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -15643,7 +15708,7 @@ index bcda816..5c89791 100644
/*
* Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1308,7 +1559,7 @@ debug_stack_correct:
+@@ -1308,7 +1575,7 @@ debug_stack_correct:
call do_debug
jmp ret_from_exception
CFI_ENDPROC
@@ -15652,7 +15717,7 @@ index bcda816..5c89791 100644
/*
* NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1345,6 +1596,9 @@ nmi_stack_correct:
+@@ -1345,6 +1612,9 @@ nmi_stack_correct:
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
@@ -15662,7 +15727,7 @@ index bcda816..5c89791 100644
jmp restore_all_notrace
CFI_ENDPROC
-@@ -1381,12 +1635,15 @@ nmi_espfix_stack:
+@@ -1381,12 +1651,15 @@ nmi_espfix_stack:
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
@@ -15679,7 +15744,7 @@ index bcda816..5c89791 100644
ENTRY(int3)
RING0_INT_FRAME
-@@ -1398,14 +1655,14 @@ ENTRY(int3)
+@@ -1398,14 +1671,14 @@ ENTRY(int3)
call do_int3
jmp ret_from_exception
CFI_ENDPROC
@@ -15696,7 +15761,7 @@ index bcda816..5c89791 100644
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
-@@ -1413,7 +1670,7 @@ ENTRY(async_page_fault)
+@@ -1413,7 +1686,7 @@ ENTRY(async_page_fault)
pushl_cfi $do_async_page_fault
jmp error_code
CFI_ENDPROC
@@ -15706,7 +15771,7 @@ index bcda816..5c89791 100644
/*
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index faf8d5e..4f16a68 100644
+index faf8d5e..ed7340c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -55,6 +55,8 @@
@@ -15782,7 +15847,7 @@ index faf8d5e..4f16a68 100644
jmp *%rdi
#endif
-@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
+@@ -178,6 +186,280 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -16011,12 +16076,9 @@ index faf8d5e..4f16a68 100644
+.endm
+
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+/*
-+ * r11: thread_info
-+ * rcx, rdx: can be clobbered
-+ */
+ENTRY(pax_erase_kstack)
+ pushq %rdi
++ pushq %rcx
+ pushq %rax
+ pushq %r11
+
@@ -16057,6 +16119,7 @@ index faf8d5e..4f16a68 100644
+
+ popq %r11
+ popq %rax
++ popq %rcx
+ popq %rdi
+ pax_force_retaddr
+ ret
@@ -16065,7 +16128,7 @@ index faf8d5e..4f16a68 100644
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
#ifdef CONFIG_TRACE_IRQFLAGS
-@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
+@@ -231,8 +513,8 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro UNFAKE_STACK_FRAME
@@ -16076,7 +16139,7 @@ index faf8d5e..4f16a68 100644
.endm
/*
-@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -319,7 +601,7 @@ ENDPROC(native_usergs_sysret64)
movq %rsp, %rsi
leaq -RBP(%rsp),%rdi /* arg1 for handler */
@@ -16085,7 +16148,7 @@ index faf8d5e..4f16a68 100644
je 1f
SWAPGS
/*
-@@ -355,9 +639,10 @@ ENTRY(save_rest)
+@@ -355,9 +637,10 @@ ENTRY(save_rest)
movq_cfi r15, R15+16
movq %r11, 8(%rsp) /* return address */
FIXUP_TOP_OF_STACK %r11, 16
@@ -16097,7 +16160,7 @@ index faf8d5e..4f16a68 100644
/* save complete stack frame */
.pushsection .kprobes.text, "ax"
-@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
+@@ -386,9 +669,10 @@ ENTRY(save_paranoid)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
@@ -16110,7 +16173,7 @@ index faf8d5e..4f16a68 100644
.popsection
/*
-@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
+@@ -410,7 +694,7 @@ ENTRY(ret_from_fork)
RESTORE_REST
@@ -16119,7 +16182,7 @@ index faf8d5e..4f16a68 100644
je int_ret_from_sys_call
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
-@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
+@@ -420,7 +704,7 @@ ENTRY(ret_from_fork)
jmp ret_from_sys_call # go to the SYSRET fastpath
CFI_ENDPROC
@@ -16128,7 +16191,7 @@ index faf8d5e..4f16a68 100644
/*
* System call entry. Up to 6 arguments in registers are supported.
-@@ -456,7 +742,7 @@ END(ret_from_fork)
+@@ -456,7 +740,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
@@ -16137,12 +16200,17 @@ index faf8d5e..4f16a68 100644
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
-@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
+@@ -469,12 +753,18 @@ ENTRY(system_call_after_swapgs)
movq %rsp,PER_CPU_VAR(old_rsp)
movq PER_CPU_VAR(kernel_stack),%rsp
+ SAVE_ARGS 8*6,0
+ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
/*
* No need to follow this irqs off/on section - it's straight
* and short:
@@ -16152,7 +16220,7 @@ index faf8d5e..4f16a68 100644
movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
movq %rcx,RIP-ARGOFFSET(%rsp)
CFI_REL_OFFSET rip,RIP-ARGOFFSET
-@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
+@@ -484,7 +774,7 @@ ENTRY(system_call_after_swapgs)
system_call_fastpath:
cmpq $__NR_syscall_max,%rax
ja badsys
@@ -16161,7 +16229,7 @@ index faf8d5e..4f16a68 100644
call *sys_call_table(,%rax,8) # XXX: rip relative
movq %rax,RAX-ARGOFFSET(%rsp)
/*
-@@ -503,6 +790,8 @@ sysret_check:
+@@ -503,6 +793,8 @@ sysret_check:
andl %edi,%edx
jnz sysret_careful
CFI_REMEMBER_STATE
@@ -16170,7 +16238,7 @@ index faf8d5e..4f16a68 100644
/*
* sysretq will re-enable interrupts:
*/
-@@ -554,14 +843,18 @@ badsys:
+@@ -554,14 +846,18 @@ badsys:
* jump back to the normal fast path.
*/
auditsys:
@@ -16190,7 +16258,7 @@ index faf8d5e..4f16a68 100644
jmp system_call_fastpath
/*
-@@ -591,16 +884,20 @@ tracesys:
+@@ -591,16 +887,20 @@ tracesys:
FIXUP_TOP_OF_STACK %rdi
movq %rsp,%rdi
call syscall_trace_enter
@@ -16212,7 +16280,7 @@ index faf8d5e..4f16a68 100644
call *sys_call_table(,%rax,8)
movq %rax,RAX-ARGOFFSET(%rsp)
/* Use IRET because user could have changed frame */
-@@ -612,7 +909,7 @@ tracesys:
+@@ -612,7 +912,7 @@ tracesys:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -16221,15 +16289,18 @@ index faf8d5e..4f16a68 100644
je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
-@@ -623,6 +920,7 @@ GLOBAL(int_with_check)
+@@ -623,7 +923,9 @@ GLOBAL(int_with_check)
andl %edi,%edx
jnz int_careful
andl $~TS_COMPAT,TI_status(%rcx)
+- jmp retint_swapgs
++ pax_exit_kernel_user
+ pax_erase_kstack
- jmp retint_swapgs
++ jmp retint_swapgs_pax
/* Either reschedule or signal or syscall exit tracking needed. */
-@@ -669,7 +967,7 @@ int_restore_rest:
+ /* First do a reschedule test. */
+@@ -669,7 +971,7 @@ int_restore_rest:
TRACE_IRQS_OFF
jmp int_with_check
CFI_ENDPROC
@@ -16238,7 +16309,7 @@ index faf8d5e..4f16a68 100644
/*
* Certain special system calls that need to save a complete full stack frame.
-@@ -685,7 +983,7 @@ ENTRY(\label)
+@@ -685,7 +987,7 @@ ENTRY(\label)
call \func
jmp ptregscall_common
CFI_ENDPROC
@@ -16247,7 +16318,7 @@ index faf8d5e..4f16a68 100644
.endm
PTREGSCALL stub_clone, sys_clone, %r8
-@@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
+@@ -703,9 +1005,10 @@ ENTRY(ptregscall_common)
movq_cfi_restore R12+8, r12
movq_cfi_restore RBP+8, rbp
movq_cfi_restore RBX+8, rbx
@@ -16259,7 +16330,7 @@ index faf8d5e..4f16a68 100644
ENTRY(stub_execve)
CFI_STARTPROC
-@@ -720,7 +1019,7 @@ ENTRY(stub_execve)
+@@ -720,7 +1023,7 @@ ENTRY(stub_execve)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -16268,7 +16339,7 @@ index faf8d5e..4f16a68 100644
/*
* sigreturn is special because it needs to restore all registers on return.
-@@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
+@@ -738,7 +1041,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST
jmp int_ret_from_sys_call
CFI_ENDPROC
@@ -16277,7 +16348,7 @@ index faf8d5e..4f16a68 100644
/*
* Build the entry stubs and pointer table with some assembler magic.
-@@ -773,7 +1072,7 @@ vector=vector+1
+@@ -773,7 +1076,7 @@ vector=vector+1
2: jmp common_interrupt
.endr
CFI_ENDPROC
@@ -16286,7 +16357,7 @@ index faf8d5e..4f16a68 100644
.previous
END(interrupt)
-@@ -793,6 +1092,16 @@ END(interrupt)
+@@ -793,6 +1096,16 @@ END(interrupt)
subq $ORIG_RAX-RBP, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
SAVE_ARGS_IRQ
@@ -16303,7 +16374,7 @@ index faf8d5e..4f16a68 100644
call \func
.endm
-@@ -824,7 +1133,7 @@ ret_from_intr:
+@@ -824,7 +1137,7 @@ ret_from_intr:
exit_intr:
GET_THREAD_INFO(%rcx)
@@ -16312,11 +16383,12 @@ index faf8d5e..4f16a68 100644
je retint_kernel
/* Interrupt came from user space */
-@@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
+@@ -846,12 +1159,16 @@ retint_swapgs: /* return to user-space */
* The iretq could re-enable interrupts:
*/
DISABLE_INTERRUPTS(CLBR_ANY)
+ pax_exit_kernel_user
++retint_swapgs_pax:
TRACE_IRQS_IRETQ
SWAPGS
jmp restore_args
@@ -16328,7 +16400,7 @@ index faf8d5e..4f16a68 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
+@@ -940,7 +1257,7 @@ ENTRY(retint_kernel)
#endif
CFI_ENDPROC
@@ -16337,7 +16409,7 @@ index faf8d5e..4f16a68 100644
/*
* End of kprobes section
*/
-@@ -956,7 +1268,7 @@ ENTRY(\sym)
+@@ -956,7 +1273,7 @@ ENTRY(\sym)
interrupt \do_sym
jmp ret_from_intr
CFI_ENDPROC
@@ -16346,7 +16418,7 @@ index faf8d5e..4f16a68 100644
.endm
#ifdef CONFIG_SMP
-@@ -1021,12 +1333,22 @@ ENTRY(\sym)
+@@ -1021,12 +1338,22 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -16370,7 +16442,7 @@ index faf8d5e..4f16a68 100644
.endm
.macro paranoidzeroentry sym do_sym
-@@ -1038,15 +1360,25 @@ ENTRY(\sym)
+@@ -1038,15 +1365,25 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -16398,7 +16470,7 @@ index faf8d5e..4f16a68 100644
.macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym)
INTR_FRAME
-@@ -1056,14 +1388,30 @@ ENTRY(\sym)
+@@ -1056,14 +1393,30 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
TRACE_IRQS_OFF
@@ -16430,7 +16502,7 @@ index faf8d5e..4f16a68 100644
.endm
.macro errorentry sym do_sym
-@@ -1074,13 +1422,23 @@ ENTRY(\sym)
+@@ -1074,13 +1427,23 @@ ENTRY(\sym)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call error_entry
DEFAULT_FRAME 0
@@ -16455,7 +16527,7 @@ index faf8d5e..4f16a68 100644
.endm
/* error code is on the stack already */
-@@ -1093,13 +1451,23 @@ ENTRY(\sym)
+@@ -1093,13 +1456,23 @@ ENTRY(\sym)
call save_paranoid
DEFAULT_FRAME 0
TRACE_IRQS_OFF
@@ -16480,7 +16552,7 @@ index faf8d5e..4f16a68 100644
.endm
zeroentry divide_error do_divide_error
-@@ -1129,9 +1497,10 @@ gs_change:
+@@ -1129,9 +1502,10 @@ gs_change:
2: mfence /* workaround */
SWAPGS
popfq_cfi
@@ -16492,7 +16564,7 @@ index faf8d5e..4f16a68 100644
.section __ex_table,"a"
.align 8
-@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
+@@ -1153,13 +1527,14 @@ ENTRY(kernel_thread_helper)
* Here we are in the child and the registers are set as they were
* at kernel_thread() invocation in the parent.
*/
@@ -16508,7 +16580,7 @@ index faf8d5e..4f16a68 100644
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
+@@ -1186,11 +1561,11 @@ ENTRY(kernel_execve)
RESTORE_REST
testq %rax,%rax
je int_ret_from_sys_call
@@ -16522,7 +16594,7 @@ index faf8d5e..4f16a68 100644
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(call_softirq)
-@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
+@@ -1208,9 +1583,10 @@ ENTRY(call_softirq)
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
decl PER_CPU_VAR(irq_count)
@@ -16534,7 +16606,7 @@ index faf8d5e..4f16a68 100644
#ifdef CONFIG_XEN
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
-@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+@@ -1248,7 +1624,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
@@ -16543,7 +16615,7 @@ index faf8d5e..4f16a68 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
+@@ -1307,7 +1683,7 @@ ENTRY(xen_failsafe_callback)
SAVE_ALL
jmp error_exit
CFI_ENDPROC
@@ -16552,7 +16624,7 @@ index faf8d5e..4f16a68 100644
apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
+@@ -1356,16 +1732,31 @@ ENTRY(paranoid_exit)
TRACE_IRQS_OFF
testl %ebx,%ebx /* swapgs needed? */
jnz paranoid_restore
@@ -16585,7 +16657,7 @@ index faf8d5e..4f16a68 100644
jmp irq_return
paranoid_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1394,7 +1780,7 @@ paranoid_schedule:
+@@ -1394,7 +1785,7 @@ paranoid_schedule:
TRACE_IRQS_OFF
jmp paranoid_userspace
CFI_ENDPROC
@@ -16594,7 +16666,7 @@ index faf8d5e..4f16a68 100644
/*
* Exception entry point. This expects an error code/orig_rax on the stack.
-@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
+@@ -1421,12 +1812,13 @@ ENTRY(error_entry)
movq_cfi r14, R14+8
movq_cfi r15, R15+8
xorl %ebx,%ebx
@@ -16609,7 +16681,7 @@ index faf8d5e..4f16a68 100644
ret
/*
-@@ -1453,7 +1840,7 @@ bstep_iret:
+@@ -1453,7 +1845,7 @@ bstep_iret:
movq %rcx,RIP+8(%rsp)
jmp error_swapgs
CFI_ENDPROC
@@ -16618,7 +16690,7 @@ index faf8d5e..4f16a68 100644
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
-@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
+@@ -1473,7 +1865,7 @@ ENTRY(error_exit)
jnz retint_careful
jmp retint_swapgs
CFI_ENDPROC
@@ -16627,7 +16699,7 @@ index faf8d5e..4f16a68 100644
/* runs on exception stack */
-@@ -1485,6 +1872,16 @@ ENTRY(nmi)
+@@ -1485,6 +1877,16 @@ ENTRY(nmi)
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
call save_paranoid
DEFAULT_FRAME 0
@@ -16644,7 +16716,7 @@ index faf8d5e..4f16a68 100644
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
-@@ -1495,12 +1892,28 @@ ENTRY(nmi)
+@@ -1495,12 +1897,28 @@ ENTRY(nmi)
DISABLE_INTERRUPTS(CLBR_NONE)
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
@@ -16674,7 +16746,7 @@ index faf8d5e..4f16a68 100644
jmp irq_return
nmi_userspace:
GET_THREAD_INFO(%rcx)
-@@ -1529,14 +1942,14 @@ nmi_schedule:
+@@ -1529,14 +1947,14 @@ nmi_schedule:
jmp paranoid_exit
CFI_ENDPROC
#endif
@@ -44582,6 +44654,20 @@ index 608c1c3..7d040a8 100644
set_fs(fs_save);
return rc;
}
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index a6f3763..f38ed00 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1540,8 +1540,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
+ error = PTR_ERR(file);
+ goto out_free_fd;
+ }
+- fd_install(fd, file);
+ ep->file = file;
++ fd_install(fd, file);
+ return fd;
+
+ out_free_fd:
diff --git a/fs/exec.c b/fs/exec.c
index 160cd2f..7f5ba47 100644
--- a/fs/exec.c
@@ -50737,6 +50823,19 @@ index 23ce927..e274cc1 100644
if (!IS_ERR(s))
kfree(s);
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 87323f1..dab9d00 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -858,7 +858,7 @@ xfs_rtbuf_get(
+ xfs_buf_t *bp; /* block buffer, result */
+ xfs_inode_t *ip; /* bitmap or summary inode */
+ xfs_bmbt_irec_t map;
+- int nmap;
++ int nmap = 1;
+ int error; /* error value */
+
+ ip = issum ? mp->m_rsumip : mp->m_rbmip;
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
index 0000000..cb7b8ea
@@ -63132,7 +63231,7 @@ index a6deef4..c56a7f2 100644
and pointers */
#endif
diff --git a/include/linux/init.h b/include/linux/init.h
-index 9146f39..e19693b 100644
+index 9146f39..5c80baf 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -38,9 +38,15 @@
@@ -63179,22 +63278,6 @@ index 9146f39..e19693b 100644
#define __meminitdata __section(.meminit.data)
#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
-@@ -293,13 +299,13 @@ void __init parse_early_options(char *cmdline);
-
- /* Each module must use one module_init(). */
- #define module_init(initfn) \
-- static inline initcall_t __inittest(void) \
-+ static inline __used initcall_t __inittest(void) \
- { return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
-
- /* This is only required if you want to be unloadable. */
- #define module_exit(exitfn) \
-- static inline exitcall_t __exittest(void) \
-+ static inline __used exitcall_t __exittest(void) \
- { return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
-
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index cdde2b3..d782954 100644
--- a/include/linux/init_task.h
@@ -71586,18 +71669,10 @@ index fea790a..ebb0e82 100644
"stack [addr=%p]\n", addr);
}
diff --git a/lib/extable.c b/lib/extable.c
-index 4cac81e..63e9b8f 100644
+index 4cac81e..ba85842 100644
--- a/lib/extable.c
+++ b/lib/extable.c
-@@ -13,6 +13,7 @@
- #include <linux/init.h>
- #include <linux/sort.h>
- #include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-
- #ifndef ARCH_HAS_SORT_EXTABLE
- /*
-@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
+@@ -36,8 +36,10 @@ static int cmp_ex(const void *a, const void *b)
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
@@ -80936,10 +81011,10 @@ index 38f6617..e70b72b 100755
exuberant()
diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..7963a07 100644
+index 51bd5a0..047aa78 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,876 @@
+@@ -4,6 +4,888 @@
menu "Security options"
@@ -81525,6 +81600,10 @@ index 51bd5a0..7963a07 100644
+ Select the method used to instrument function pointer dereferences.
+ Note that binary modules cannot be instrumented by this approach.
+
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
+ config PAX_KERNEXEC_PLUGIN_METHOD_BTS
+ bool "bts"
+ help
@@ -81698,11 +81777,12 @@ index 51bd5a0..7963a07 100644
+ and you are advised to test this feature on your expected workload
+ before deploying it.
+
-+ Note: full support for this feature requires gcc with plugin support
-+ so make sure your compiler is at least gcc 4.5.0. Using older gcc
-+ versions means that functions with large enough stack frames may
-+ leave uninitialized memory behind that may be exposed to a later
-+ syscall leaking the stack.
++ Note that the full feature requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package. Using
++ older gcc versions means that functions with large enough stack
++ frames may leave uninitialized memory behind that may be exposed
++ to a later syscall leaking the stack.
+
+config PAX_MEMORY_UDEREF
+ bool "Prevent invalid userland pointer dereference"
@@ -81784,11 +81864,14 @@ index 51bd5a0..7963a07 100644
+ arguments marked by a size_overflow attribute with double integer
+ precision (DImode/TImode for 32/64 bit integer types).
+
-+ The recomputed argument is checked against INT_MAX and an event
++ The recomputed argument is checked against TYPE_MAX and an event
+ is logged on overflow and the triggering process is killed.
+
-+ Homepage:
-+ http://www.grsecurity.net/~ephox/overflow_plugin/
++ Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
++
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
+
+config PAX_LATENT_ENTROPY
+ bool "Generate some entropy during boot"
@@ -81800,6 +81883,10 @@ index 51bd5a0..7963a07 100644
+ there is little 'natural' source of entropy normally. The cost
+ is some slowdown of the boot process.
+
++ Note that the implementation requires a gcc with plugin support,
++ i.e., gcc 4.5 or newer. You may need to install the supporting
++ headers explicitly in addition to the normal gcc package.
++
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
@@ -81816,7 +81903,7 @@ index 51bd5a0..7963a07 100644
config KEYS
bool "Enable access key retention support"
help
-@@ -169,7 +1039,7 @@ config INTEL_TXT
+@@ -169,7 +1051,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/3.5.2/0000_README b/3.5.2/0000_README
index 1900e0a..24c63b2 100644
--- a/3.5.2/0000_README
+++ b/3.5.2/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.9.1-3.5.2-201208241943.patch
+Patch: 4420_grsecurity-2.9.1-3.5.3-201208271906.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.5.2/4420_grsecurity-2.9.1-3.5.2-201208241943.patch b/3.5.2/4420_grsecurity-2.9.1-3.5.3-201208271906.patch
index 8f28b61..9557d64 100644
--- a/3.5.2/4420_grsecurity-2.9.1-3.5.2-201208241943.patch
+++ b/3.5.2/4420_grsecurity-2.9.1-3.5.3-201208271906.patch
@@ -275,7 +275,7 @@ index 13d6166..8c235b6 100644
==============================================================
diff --git a/Makefile b/Makefile
-index 5caa2fa..5fc9329 100644
+index c901aae..0f96503 100644
--- a/Makefile
+++ b/Makefile
@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -4374,6 +4374,26 @@ index d183f87..1867f1a 100644
regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
+index f2496f2..4e3cc47 100644
+--- a/arch/powerpc/kernel/syscalls.c
++++ b/arch/powerpc/kernel/syscalls.c
+@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
+ long ret;
+
+ if (personality(current->personality) == PER_LINUX32
+- && personality == PER_LINUX)
+- personality = PER_LINUX32;
++ && personality(personality) == PER_LINUX)
++ personality = (personality & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+ }
+ #endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1589723..cefe690 100644
--- a/arch/powerpc/kernel/traps.c
@@ -20274,7 +20294,7 @@ index 7df1c6d..9ea7c79 100644
out:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index f95d242..3b49a90 100644
+index 4837375..2cc9722 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -256,6 +256,7 @@ struct gprefix {
@@ -20356,10 +20376,10 @@ index f75af40..285b18f 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 32eb588..19c4fe3 100644
+index 86c8704..e8ee2ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
-@@ -1313,7 +1313,11 @@ static void reload_tss(void)
+@@ -1317,7 +1317,11 @@ static void reload_tss(void)
struct desc_struct *descs;
descs = (void *)gdt->address;
@@ -20371,18 +20391,7 @@ index 32eb588..19c4fe3 100644
load_TR_desc();
}
-@@ -1475,8 +1479,8 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- */
-- loadsegment(ds, __USER_DS);
-- loadsegment(es, __USER_DS);
-+ loadsegment(ds, __KERNEL_DS);
-+ loadsegment(es, __KERNEL_DS);
- #endif
- reload_tss();
- #ifdef CONFIG_X86_64
-@@ -2653,8 +2657,11 @@ static __init int hardware_setup(void)
+@@ -2650,8 +2654,11 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
@@ -20396,7 +20405,7 @@ index 32eb588..19c4fe3 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -3680,7 +3687,7 @@ static void vmx_set_constant_host_state(void)
+@@ -3719,7 +3726,7 @@ static void vmx_set_constant_host_state(void)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
@@ -20405,7 +20414,7 @@ index 32eb588..19c4fe3 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6218,6 +6225,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6257,6 +6264,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp .Lkvm_vmx_return \n\t"
".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
".Lkvm_vmx_return: "
@@ -20418,7 +20427,7 @@ index 32eb588..19c4fe3 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%"R"sp) \n\t"
"pop %0 \n\t"
-@@ -6266,6 +6279,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -6305,6 +6318,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -20430,28 +20439,41 @@ index 32eb588..19c4fe3 100644
: "cc", "memory"
, R"ax", R"bx", R"di", R"si"
#ifdef CONFIG_X86_64
-@@ -6294,6 +6312,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- }
- }
+@@ -6312,7 +6330,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ );
-+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
+-#ifndef CONFIG_X86_64
++#ifdef CONFIG_X86_32
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+@@ -6321,8 +6339,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ * may be executed in interrupt context, which saves and restore segments
+ * around it, nullifying its effect.
+ */
+- loadsegment(ds, __USER_DS);
+- loadsegment(es, __USER_DS);
++ loadsegment(ds, __KERNEL_DS);
++ loadsegment(es, __KERNEL_DS);
++ loadsegment(ss, __KERNEL_DS);
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#ifdef CONFIG_PAX_KERNEXEC
+ loadsegment(fs, __KERNEL_PERCPU);
+#endif
+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#ifdef CONFIG_PAX_MEMORY_UDEREF
+ __set_fs(current_thread_info()->addr_limit);
+#endif
+
- vmx->loaded_vmcs->launched = 1;
+ #endif
- vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+ vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index be6d549..b0ba2bf 100644
+index 14c290d..0dae6e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1361,8 +1361,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -20462,7 +20484,7 @@ index be6d549..b0ba2bf 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2214,6 +2214,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2218,6 +2218,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -20471,7 +20493,7 @@ index be6d549..b0ba2bf 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -2339,7 +2341,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+@@ -2343,7 +2345,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -20480,7 +20502,7 @@ index be6d549..b0ba2bf 100644
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
-@@ -4876,7 +4878,7 @@ static void kvm_set_mmio_spte_mask(void)
+@@ -4880,7 +4882,7 @@ static void kvm_set_mmio_spte_mask(void)
kvm_mmu_set_mmio_spte_mask(mask);
}
@@ -23386,15 +23408,14 @@ index e5b130b..6690d31 100644
+}
+EXPORT_SYMBOL(copy_to_user_overflow);
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
-index 903ec1e..833f340 100644
+index 903ec1e..af8e064 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
-@@ -6,12 +6,25 @@
+@@ -6,12 +6,24 @@
static inline unsigned long
ex_insn_addr(const struct exception_table_entry *x)
{
- return (unsigned long)&x->insn + x->insn;
-+//printk(KERN_ERR "fixup %p insn:%x fixup:%x\n", x, x->insn, x->fixup);
+ unsigned long reloc = 0;
+
+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
@@ -23417,7 +23438,7 @@ index 903ec1e..833f340 100644
}
int fixup_exception(struct pt_regs *regs)
-@@ -20,7 +33,7 @@ int fixup_exception(struct pt_regs *regs)
+@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
unsigned long new_ip;
#ifdef CONFIG_PNPBIOS
@@ -23426,14 +23447,6 @@ index 903ec1e..833f340 100644
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
extern u32 pnp_bios_is_utter_crap;
pnp_bios_is_utter_crap = 1;
-@@ -34,6 +47,7 @@ int fixup_exception(struct pt_regs *regs)
- #endif
-
- fixup = search_exception_tables(regs->ip);
-+//printk(KERN_ERR "fixup %p %lx\n", fixup, regs->ip);
- if (fixup) {
- new_ip = ex_fixup_addr(fixup);
-
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 76dcd9d..e9dffde 100644
--- a/arch/x86/mm/fault.c
@@ -30571,7 +30584,7 @@ index ed3224c..6618589 100644
iir = I915_READ(IIR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index a8538ac..4868a05 100644
+index 8a11131..46eeeaa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2000,7 +2000,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
@@ -30583,7 +30596,7 @@ index a8538ac..4868a05 100644
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
-@@ -5925,9 +5925,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+@@ -5914,9 +5914,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
@@ -30595,7 +30608,7 @@ index a8538ac..4868a05 100644
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
-@@ -6264,7 +6263,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+@@ -6253,7 +6252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
@@ -30604,7 +30617,7 @@ index a8538ac..4868a05 100644
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
-@@ -6279,7 +6278,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+@@ -6268,7 +6267,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_pending:
@@ -30769,7 +30782,7 @@ index a9514ea..369d511 100644
.train_set = nv50_sor_dp_train_set,
.train_adj = nv50_sor_dp_train_adj
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
-index c486d3c..3a7d6f4 100644
+index c50b075..6b07dfc 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
@@ -44239,6 +44252,20 @@ index b2a34a1..162fa69 100644
set_fs(fs_save);
return rc;
}
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 1c8b556..eedec84 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
+ error = PTR_ERR(file);
+ goto out_free_fd;
+ }
+- fd_install(fd, file);
+ ep->file = file;
++ fd_install(fd, file);
+ return fd;
+
+ out_free_fd:
diff --git a/fs/exec.c b/fs/exec.c
index e95aeed..a943469 100644
--- a/fs/exec.c
@@ -45127,10 +45154,10 @@ index 25cd608..9ed5294 100644
}
return 1;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
-index d23b31c..0585239 100644
+index 1b50890..e56c5ad 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
-@@ -488,8 +488,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+@@ -500,8 +500,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
/* Hm, nope. Are (enough) root reserved clusters available? */
if (uid_eq(sbi->s_resuid, current_fsuid()) ||
(!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
@@ -45175,18 +45202,6 @@ index 01434f2..bd995b4 100644
atomic_t s_lock_busy;
/* locality groups */
-diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
-index 58a75fe..9752106 100644
---- a/fs/ext4/extents.c
-+++ b/fs/ext4/extents.c
-@@ -2663,6 +2663,7 @@ cont:
- }
- path[0].p_depth = depth;
- path[0].p_hdr = ext_inode_hdr(inode);
-+ i = 0;
-
- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
- err = -EIO;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 1cd6994..5799d45 100644
--- a/fs/ext4/mballoc.c
@@ -50121,6 +50136,27 @@ index 19bf0c5..9f26b02 100644
off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off & 0x7fffffff;
return 0;
+diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
+index f9c3fe3..69cf4fc 100644
+--- a/fs/xfs/xfs_discard.c
++++ b/fs/xfs/xfs_discard.c
+@@ -179,12 +179,14 @@ xfs_ioc_trim(
+ * used by the fstrim application. In the end it really doesn't
+ * matter as trimming blocks is an advisory interface.
+ */
++ if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
++ range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
++ return -XFS_ERROR(EINVAL);
++
+ start = BTOBB(range.start);
+ end = start + BTOBBT(range.len) - 1;
+ minlen = BTOBB(max_t(u64, granularity, range.minlen));
+
+- if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
+- return -XFS_ERROR(EINVAL);
+ if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
+ end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
+
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 3a05a41..320bec6 100644
--- a/fs/xfs/xfs_ioctl.c
@@ -50147,6 +50183,19 @@ index 1a25fd8..e935581 100644
if (!IS_ERR(s))
kfree(s);
+diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
+index 92d4331..ca28a4b 100644
+--- a/fs/xfs/xfs_rtalloc.c
++++ b/fs/xfs/xfs_rtalloc.c
+@@ -857,7 +857,7 @@ xfs_rtbuf_get(
+ xfs_buf_t *bp; /* block buffer, result */
+ xfs_inode_t *ip; /* bitmap or summary inode */
+ xfs_bmbt_irec_t map;
+- int nmap;
++ int nmap = 1;
+ int error; /* error value */
+
+ ip = issum ? mp->m_rsumip : mp->m_rbmip;
diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
new file mode 100644
index 0000000..4d533f1