summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--4.5.7/0000_README2
-rw-r--r--4.5.7/4420_grsecurity-3.1-4.5.7-201606302132.patch (renamed from 4.5.7/4420_grsecurity-3.1-4.5.7-201606292300.patch)416
-rw-r--r--4.5.7/4425_grsec_remove_EI_PAX.patch2
-rw-r--r--4.5.7/4450_grsec-kconfig-default-gids.patch8
-rw-r--r--4.5.7/4470_disable-compat_vdso.patch2
-rw-r--r--4.5.7/4475_emutramp_default_on.patch4
6 files changed, 252 insertions, 182 deletions
diff --git a/4.5.7/0000_README b/4.5.7/0000_README
index 6531b4d..cd47bdd 100644
--- a/4.5.7/0000_README
+++ b/4.5.7/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.5.7-201606292300.patch
+Patch: 4420_grsecurity-3.1-4.5.7-201606302132.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.5.7/4420_grsecurity-3.1-4.5.7-201606292300.patch b/4.5.7/4420_grsecurity-3.1-4.5.7-201606302132.patch
index 4f4d48f..6f9feec 100644
--- a/4.5.7/4420_grsecurity-3.1-4.5.7-201606292300.patch
+++ b/4.5.7/4420_grsecurity-3.1-4.5.7-201606302132.patch
@@ -12658,7 +12658,7 @@ index 3ba5ff2..44bdacc 100644
config X86_MINIMUM_CPU_FAMILY
int
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 9b18ed9..9528749 100644
+index 9b18ed9..0fb0660 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -55,6 +55,7 @@ config X86_PTDUMP
@@ -12669,16 +12669,15 @@ index 9b18ed9..9528749 100644
select X86_PTDUMP_CORE
---help---
Say Y here if you want to show the kernel pagetable layout in a
-@@ -77,7 +78,7 @@ config EFI_PGT_DUMP
+@@ -77,7 +78,6 @@ config EFI_PGT_DUMP
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
default y
- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && BROKEN
---help---
Mark the kernel read-only data as write-protected in the pagetables,
in order to catch accidental (and incorrect) writes to such const
-@@ -123,7 +124,7 @@ config DEBUG_WX
+@@ -123,7 +123,7 @@ config DEBUG_WX
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
@@ -12687,7 +12686,7 @@ index 9b18ed9..9528749 100644
---help---
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
-@@ -375,6 +376,7 @@ config X86_DEBUG_FPU
+@@ -375,6 +375,7 @@ config X86_DEBUG_FPU
config PUNIT_ATOM_DEBUG
tristate "ATOM Punit debug driver"
select DEBUG_FS
@@ -27194,7 +27193,7 @@ index 2c0f340..76c1d24 100644
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
-index 6bc9ae2..33997fe 100644
+index 6bc9ae2..51f7c58 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -27,6 +27,12 @@
@@ -27466,28 +27465,23 @@ index 6bc9ae2..33997fe 100644
pushl 16(%esp)
pushl 24(%esp)
pushl 32(%esp)
-@@ -663,29 +755,34 @@ ENTRY(setup_once_ref)
- /*
- * BSS section
- */
+@@ -660,11 +752,8 @@ ENTRY(initial_code)
+ ENTRY(setup_once_ref)
+ .long setup_once
+
+-/*
+- * BSS section
+- */
-__PAGE_ALIGNED_BSS
- .align PAGE_SIZE
++__READ_ONLY
++ .balign PAGE_SIZE
#ifdef CONFIG_X86_PAE
-+.section .initial_pg_pmd,"a",@progbits
initial_pg_pmd:
.fill 1024*KPMDS,4,0
- #else
-+.section .initial_page_table,"a",@progbits
- ENTRY(initial_page_table)
- .fill 1024,4,0
- #endif
-+.section .initial_pg_fixmap,"a",@progbits
- initial_pg_fixmap:
- .fill 1024,4,0
-+.section .empty_zero_page,"a",@progbits
+@@ -677,15 +766,18 @@ initial_pg_fixmap:
ENTRY(empty_zero_page)
.fill 4096,1,0
-+.section .swapper_pg_dir,"a",@progbits
ENTRY(swapper_pg_dir)
- .fill 1024,4,0
+#ifdef CONFIG_X86_PAE
@@ -27503,21 +27497,24 @@ index 6bc9ae2..33997fe 100644
-__PAGE_ALIGNED_DATA
- /* Page-aligned for the benefit of paravirt? */
- .align PAGE_SIZE
-+.section .initial_page_table,"a",@progbits
++__READ_ONLY
++ .balign PAGE_SIZE
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
-@@ -704,12 +801,20 @@ ENTRY(initial_page_table)
+@@ -703,13 +795,21 @@ ENTRY(initial_page_table)
+ # else
# error "Kernel PMDs should be 1, 2 or 3"
# endif
- .align PAGE_SIZE /* needs to be page-sized too */
+- .align PAGE_SIZE /* needs to be page-sized too */
++ .balign PAGE_SIZE /* needs to be page-sized too */
+
-+#ifdef CONFIG_PAX_PER_CPU_PGD
++# ifdef CONFIG_PAX_PER_CPU_PGD
+ENTRY(cpu_pgd)
+ .rept 2*NR_CPUS
+ .fill PTRS_PER_PGD,8,0
+ .endr
-+#endif
++# endif
+
#endif
@@ -27529,16 +27526,16 @@ index 6bc9ae2..33997fe 100644
__INITRODATA
int_msg:
-@@ -737,7 +842,7 @@ fault_msg:
+@@ -737,7 +837,7 @@ fault_msg:
* segment size, and 32-bit linear address value:
*/
- .data
-+.section .rodata,"a",@progbits
++__READ_ONLY
.globl boot_gdt_descr
.globl idt_descr
-@@ -746,7 +851,7 @@ fault_msg:
+@@ -746,7 +846,7 @@ fault_msg:
.word 0 # 32 bit align gdt_desc.address
boot_gdt_descr:
.word __BOOT_DS+7
@@ -27547,7 +27544,7 @@ index 6bc9ae2..33997fe 100644
.word 0 # 32-bit align idt_desc.address
idt_descr:
-@@ -757,7 +862,7 @@ idt_descr:
+@@ -757,7 +857,7 @@ idt_descr:
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
.word GDT_ENTRIES*8-1
@@ -27556,7 +27553,7 @@ index 6bc9ae2..33997fe 100644
/*
* The boot_gdt must mirror the equivalent in setup.S and is
-@@ -766,5 +871,65 @@ ENTRY(early_gdt_descr)
+@@ -766,5 +866,65 @@ ENTRY(early_gdt_descr)
.align L1_CACHE_BYTES
ENTRY(boot_gdt)
.fill GDT_ENTRY_BOOT_CS,8,0
@@ -27625,7 +27622,7 @@ index 6bc9ae2..33997fe 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index ffdc0e8..60b5d16 100644
+index ffdc0e8..1827c62 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,8 @@
@@ -27704,7 +27701,7 @@ index ffdc0e8..60b5d16 100644
movq %rcx, %cr4
/* Setup early boot stage 4 level pagetables. */
-@@ -205,10 +239,21 @@ ENTRY(secondary_startup_64)
+@@ -205,10 +239,24 @@ ENTRY(secondary_startup_64)
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_SCE, %eax /* Enable System Call */
@@ -27716,7 +27713,10 @@ index ffdc0e8..60b5d16 100644
+ je 1f
btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
-+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START)(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 8(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 16(%rip)
++ btsq $_PAGE_BIT_NX, init_level4_pgt + (8*L4_VMALLOC_START) + 24(%rip)
+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
@@ -27727,7 +27727,7 @@ index ffdc0e8..60b5d16 100644
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -288,6 +333,7 @@ ENTRY(secondary_startup_64)
+@@ -288,6 +336,7 @@ ENTRY(secondary_startup_64)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
@@ -27735,7 +27735,7 @@ index ffdc0e8..60b5d16 100644
movq initial_code(%rip),%rax
pushq $0 # fake return address to stop unwinder
pushq $__KERNEL_CS # set correct cs
-@@ -321,7 +367,7 @@ ENDPROC(start_cpu0)
+@@ -321,7 +370,7 @@ ENDPROC(start_cpu0)
.quad INIT_PER_CPU_VAR(irq_stack_union)
GLOBAL(stack_start)
@@ -27744,7 +27744,7 @@ index ffdc0e8..60b5d16 100644
.word 0
__FINITDATA
-@@ -401,7 +447,7 @@ early_idt_handler_common:
+@@ -401,7 +450,7 @@ early_idt_handler_common:
call dump_stack
#ifdef CONFIG_KALLSYMS
leaq early_idt_ripmsg(%rip),%rdi
@@ -27753,15 +27753,15 @@ index ffdc0e8..60b5d16 100644
call __print_symbol
#endif
#endif /* EARLY_PRINTK */
-@@ -430,6 +476,7 @@ ENDPROC(early_idt_handler_common)
+@@ -430,6 +479,7 @@ ENDPROC(early_idt_handler_common)
early_recursion_flag:
.long 0
-+ .section .rodata,"a",@progbits
++ __READ_ONLY
#ifdef CONFIG_EARLY_PRINTK
early_idt_msg:
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
-@@ -452,40 +499,70 @@ GLOBAL(name)
+@@ -452,40 +502,70 @@ GLOBAL(name)
__INITDATA
NEXT_PAGE(early_level4_pgt)
.fill 511,8,0
@@ -27772,7 +27772,7 @@ index ffdc0e8..60b5d16 100644
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
- .data
-+ .section .rodata,"a",@progbits
++ __READ_ONLY
-#ifndef CONFIG_XEN
NEXT_PAGE(init_level4_pgt)
@@ -27844,7 +27844,7 @@ index ffdc0e8..60b5d16 100644
NEXT_PAGE(level2_kernel_pgt)
/*
-@@ -502,31 +579,79 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -502,31 +582,79 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
@@ -31240,7 +31240,7 @@ index e574b85..5514c57 100644
case VM86_GET_AND_RESET_IRQ: {
return get_and_reset_irq(irqnumber);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
-index 74e4bf1..a9a6168 100644
+index 74e4bf1..0897a97 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -26,6 +26,13 @@
@@ -31310,7 +31310,7 @@ index 74e4bf1..a9a6168 100644
HEAD_TEXT
. = ALIGN(8);
_stext = .;
-@@ -104,13 +124,47 @@ SECTIONS
+@@ -104,13 +124,35 @@ SECTIONS
IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
@@ -31343,18 +31343,6 @@ index 74e4bf1..a9a6168 100644
+ _etext = . - __KERNEL_TEXT_OFFSET;
+ }
+
-+#ifdef CONFIG_X86_32
-+ . = ALIGN(PAGE_SIZE);
-+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
-+ . = ALIGN(PAGE_SIZE);
-+ *(.empty_zero_page)
-+ *(.initial_pg_fixmap)
-+ *(.initial_pg_pmd)
-+ *(.initial_page_table)
-+ *(.swapper_pg_dir)
-+ } :rodata
-+#endif
-+
+ . = ALIGN(PAGE_SIZE);
+ NOTES :rodata :note
+
@@ -31362,7 +31350,7 @@ index 74e4bf1..a9a6168 100644
#if defined(CONFIG_DEBUG_RODATA)
/* .text should occupy whole number of pages */
-@@ -122,16 +176,20 @@ SECTIONS
+@@ -122,16 +164,20 @@ SECTIONS
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
@@ -31386,7 +31374,7 @@ index 74e4bf1..a9a6168 100644
PAGE_ALIGNED_DATA(PAGE_SIZE)
-@@ -174,12 +232,19 @@ SECTIONS
+@@ -174,12 +220,19 @@ SECTIONS
. = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
/* Init code and data - will be freed after init */
@@ -31409,7 +31397,7 @@ index 74e4bf1..a9a6168 100644
/*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - .init.text - should
-@@ -190,12 +255,33 @@ SECTIONS
+@@ -190,12 +243,33 @@ SECTIONS
"per-CPU data too large - increase CONFIG_PHYSICAL_START")
#endif
@@ -31447,7 +31435,7 @@ index 74e4bf1..a9a6168 100644
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
-@@ -266,19 +352,12 @@ SECTIONS
+@@ -266,19 +340,12 @@ SECTIONS
}
. = ALIGN(8);
@@ -31468,7 +31456,7 @@ index 74e4bf1..a9a6168 100644
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif
-@@ -297,16 +376,10 @@ SECTIONS
+@@ -297,16 +364,10 @@ SECTIONS
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
@@ -31486,7 +31474,7 @@ index 74e4bf1..a9a6168 100644
/* BSS */
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-@@ -322,6 +395,7 @@ SECTIONS
+@@ -322,6 +383,7 @@ SECTIONS
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
*(.brk_reservation) /* areas brk users have reserved */
@@ -31494,7 +31482,7 @@ index 74e4bf1..a9a6168 100644
__brk_limit = .;
}
-@@ -348,13 +422,12 @@ SECTIONS
+@@ -348,13 +410,12 @@ SECTIONS
* for the boot processor.
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
@@ -35806,7 +35794,7 @@ index 740d7ac..4091827 100644
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 493f541..d8e6b22 100644
+index 493f541..ee7a3f0 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,7 @@
@@ -35817,16 +35805,15 @@ index 493f541..d8e6b22 100644
#include <asm/cacheflush.h>
#include <asm/e820.h>
-@@ -17,6 +18,8 @@
+@@ -17,6 +18,7 @@
#include <asm/proto.h>
#include <asm/dma.h> /* for MAX_DMA_PFN */
#include <asm/microcode.h>
-+#include <asm/desc.h>
+#include <asm/bios_ebda.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
-@@ -618,7 +621,18 @@ void __init init_mem_mapping(void)
+@@ -618,7 +620,18 @@ void __init init_mem_mapping(void)
early_ioremap_page_table_range_init();
#endif
@@ -35845,7 +35832,7 @@ index 493f541..d8e6b22 100644
__flush_tlb_all();
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
-@@ -634,10 +648,34 @@ void __init init_mem_mapping(void)
+@@ -634,10 +647,34 @@ void __init init_mem_mapping(void)
* Access has to be given to non-kernel-ram areas as well, these contain the PCI
* mmio resources as well as potential bios/acpi data regions.
*/
@@ -35880,8 +35867,8 @@ index 493f541..d8e6b22 100644
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
if (!page_is_ram(pagenr))
-@@ -683,8 +721,127 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
- #endif
+@@ -645,6 +682,29 @@ int devmem_is_allowed(unsigned long pagenr)
+ return 0;
}
+#ifdef CONFIG_GRKERNSEC_KMEM
@@ -35907,109 +35894,29 @@ index 493f541..d8e6b22 100644
+static inline void gr_init_ebda(void) { }
+#endif
+
+ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ {
+ unsigned long begin_aligned, end_aligned;
+@@ -668,7 +728,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ */
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
+- begin, end - 1);
++ begin, end - 1);
+ set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
+ #else
+ /*
+@@ -685,6 +745,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+
void free_initmem(void)
{
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_32
-+ /* PaX: limit KERNEL_CS to actual size */
-+ unsigned long addr, limit;
-+ struct desc_struct d;
-+ int cpu;
-+#else
-+ pgd_t *pgd;
-+ pud_t *pud;
-+ pmd_t *pmd;
-+ unsigned long addr, end;
-+#endif
-+#endif
-+
+ gr_init_ebda();
+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_X86_32
-+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
-+ limit = (limit - 1UL) >> PAGE_SHIFT;
-+
-+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
-+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
-+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
-+ }
-+
-+ /* PaX: make KERNEL_CS read-only */
-+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
-+ if (!paravirt_enabled())
-+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
-+/*
-+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ }
-+*/
-+#ifdef CONFIG_X86_PAE
-+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
-+/*
-+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+*/
-+#endif
-+
-+#ifdef CONFIG_MODULES
-+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
-+#endif
-+
-+#else
-+ /* PaX: make kernel code/rodata read-only, rest non-executable */
-+ set_memory_ro((unsigned long)_text, ((unsigned long)(_sdata - _text) >> PAGE_SHIFT));
-+ set_memory_nx((unsigned long)_sdata, (__START_KERNEL_map + KERNEL_IMAGE_SIZE - (unsigned long)_sdata) >> PAGE_SHIFT);
-+
-+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ continue;
-+ if (addr >= (unsigned long)_text)
-+ BUG_ON(!pmd_large(*pmd));
-+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
-+ BUG_ON(pmd_write(*pmd));
-+// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ else
-+ BUG_ON(!(pmd_flags(*pmd) & _PAGE_NX));
-+// set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
-+ }
-+
-+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
-+ end = addr + KERNEL_IMAGE_SIZE;
-+ for (; addr < end; addr += PMD_SIZE) {
-+ pgd = pgd_offset_k(addr);
-+ pud = pud_offset(pgd, addr);
-+ pmd = pmd_offset(pud, addr);
-+ if (!pmd_present(*pmd))
-+ continue;
-+ if (addr >= (unsigned long)_text)
-+ BUG_ON(!pmd_large(*pmd));
-+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
-+ BUG_ON(pmd_write(*pmd));
-+// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+ }
-+#endif
-+
-+ flush_tlb_all();
-+#endif
-+
free_init_pages("unused kernel",
(unsigned long)(&__init_begin),
(unsigned long)(&__init_end));
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index cb4ef3d..377ec5a 100644
+index cb4ef3d..1b13259 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
@@ -36253,16 +36160,77 @@ index cb4ef3d..377ec5a 100644
pr_debug("Set kernel text: %lx - %lx for read only\n",
start, start+size);
-@@ -927,6 +931,7 @@ void mark_rodata_ro(void)
+@@ -911,7 +915,7 @@ static void mark_nxdata_nx(void)
+ * When this called, init has already been executed and released,
+ * so everything past _etext should be NX.
+ */
+- unsigned long start = PFN_ALIGN(_etext);
++ unsigned long start = ktla_ktva(PFN_ALIGN(_etext));
+ /*
+ * This comes from is_kernel_text upper limit. Also HPAGE where used:
+ */
+@@ -927,26 +931,47 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
+- set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
+- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
+- size >> 10);
++ if (config_enabled(CONFIG_PAX_KERNEXEC)) {
++ /* PaX: limit KERNEL_CS to actual size */
++ unsigned long limit;
++ struct desc_struct d;
++ int cpu;
+
+- kernel_set_to_readonly = 1;
++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++
++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
++ }
++
++ if (config_enabled(CONFIG_MODULES))
++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
++ }
++
+ start = ktla_ktva(start);
++ /* PaX: make KERNEL_CS read-only */
++ if (config_enabled(CONFIG_PAX_KERNEXEC) && !paravirt_enabled()) {
++ set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
++ printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10);
++
++ kernel_set_to_readonly = 1;
+
+ #ifdef CONFIG_CPA_DEBUG
+- printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
+- start, start+size);
+- set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
++ printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size);
++ set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
+
+- printk(KERN_INFO "Testing CPA: write protecting again\n");
+- set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
++ printk(KERN_INFO "Testing CPA: write protecting again\n");
++ set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
+ #endif
++ }
+
+ start += size;
+- size = (unsigned long)__end_rodata - start;
++ size = PFN_ALIGN(_sdata) - start;
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
- printk(KERN_INFO "Write protecting the kernel text: %luk\n",
- size >> 10);
+- printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+- size >> 10);
++ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10);
+ rodata_test();
+
+ #ifdef CONFIG_CPA_DEBUG
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 5488d21..6063860 100644
+index 5488d21..9f75681 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -137,7 +137,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
@@ -36395,6 +36363,94 @@ index 5488d21..6063860 100644
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
+@@ -1107,8 +1135,7 @@ void set_kernel_text_ro(void)
+ if (!kernel_set_to_readonly)
+ return;
+
+- pr_debug("Set kernel text: %lx - %lx for read only\n",
+- start, end);
++ pr_debug("Set kernel text: %lx - %lx for read only\n", start, end);
+
+ /*
+ * Set the kernel identity mapping for text RO.
+@@ -1118,15 +1145,20 @@ void set_kernel_text_ro(void)
+
+ void mark_rodata_ro(void)
+ {
++ unsigned long addr;
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long rodata_start = PFN_ALIGN(__start_rodata);
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long end = PFN_ALIGN(_sdata);
++ unsigned long text_end = end;
++#else
+ unsigned long end = (unsigned long) &__end_rodata_hpage_align;
+ unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
++#endif
+ unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
+ unsigned long all_end;
+
+- printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+- (end - start) >> 10);
++ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10);
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+
+ kernel_set_to_readonly = 1;
+@@ -1156,12 +1188,54 @@ void mark_rodata_ro(void)
+ set_memory_ro(start, (end-start) >> PAGE_SHIFT);
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ /* PaX: ensure that kernel code/rodata is read-only, the rest is non-executable */
++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if (addr >= (unsigned long)_text)
++ BUG_ON(!pmd_large(*pmd));
++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
++ BUG_ON(pmd_write(*pmd));
++// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ else
++ BUG_ON(!(pmd_flags(*pmd) & _PAGE_NX));
++// set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ }
++
++ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
++ end = addr + KERNEL_IMAGE_SIZE;
++ for (; addr < end; addr += PMD_SIZE) {
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ if (!pmd_present(*pmd))
++ continue;
++ if (addr >= (unsigned long)_text)
++ BUG_ON(!pmd_large(*pmd));
++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
++ BUG_ON(pmd_write(*pmd));
++// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++#else
+ free_init_pages("unused kernel",
+ (unsigned long) __va(__pa_symbol(text_end)),
+ (unsigned long) __va(__pa_symbol(rodata_start)));
+ free_init_pages("unused kernel",
+ (unsigned long) __va(__pa_symbol(rodata_end)),
+ (unsigned long) __va(__pa_symbol(_sdata)));
++#endif
+
+ debug_checkwx();
+ }
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 9c0ff04..9020d5f 100644
--- a/arch/x86/mm/iomap_32.c
@@ -131434,7 +131490,7 @@ index ba7a9b0..33a0237 100644
extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
diff --git a/include/linux/init.h b/include/linux/init.h
-index b449f37..3416791 100644
+index b449f37..2bf1598 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,7 +39,7 @@
@@ -131455,6 +131511,19 @@ index b449f37..3416791 100644
#define __meminitdata __section(.meminit.data)
#define __meminitconst __constsection(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
+@@ -117,6 +117,12 @@
+ #define __REFDATA .section ".ref.data", "aw"
+ #define __REFCONST .section ".ref.rodata", "a"
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define __READ_ONLY .section ".data..read_only","a",%progbits
++#else
++#define __READ_ONLY .section ".data..mostly","aw",%progbits
++#endif
++
+ #ifndef __ASSEMBLY__
+ /*
+ * Used for initialization calls..
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f2cb8d4..2f0363e 100644
--- a/include/linux/init_task.h
@@ -211966,10 +212035,10 @@ index 23ba1c6..cad2484 100755
# Find all available archs
find_all_archs()
diff --git a/security/Kconfig b/security/Kconfig
-index e452378..e634654 100644
+index e452378..cc25231 100644
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -4,6 +4,994 @@
+@@ -4,6 +4,995 @@
menu "Security options"
@@ -212559,6 +212628,7 @@ index e452378..e634654 100644
+ depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
+ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
+ select PAX_KERNEXEC_PLUGIN if X86_64
++ select DEBUG_RODATA if X86
+ select ARM_KERNMEM_PERMS if ARM
+ help
+ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
@@ -212964,7 +213034,7 @@ index e452378..e634654 100644
source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
-@@ -104,7 +1092,7 @@ config INTEL_TXT
+@@ -104,7 +1093,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/4.5.7/4425_grsec_remove_EI_PAX.patch b/4.5.7/4425_grsec_remove_EI_PAX.patch
index 2a1aa6c..c988c9a 100644
--- a/4.5.7/4425_grsec_remove_EI_PAX.patch
+++ b/4.5.7/4425_grsec_remove_EI_PAX.patch
@@ -8,7 +8,7 @@ X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600
diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig
--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500
+++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500
-@@ -279,7 +279,7 @@
+@@ -280,7 +280,7 @@
config PAX_EI_PAX
bool 'Use legacy ELF header marking'
diff --git a/4.5.7/4450_grsec-kconfig-default-gids.patch b/4.5.7/4450_grsec-kconfig-default-gids.patch
index 79a866b..ccf0abd 100644
--- a/4.5.7/4450_grsec-kconfig-default-gids.patch
+++ b/4.5.7/4450_grsec-kconfig-default-gids.patch
@@ -73,7 +73,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
diff -Nuar a/security/Kconfig b/security/Kconfig
--- a/security/Kconfig 2012-10-13 09:51:35.000000000 -0400
+++ b/security/Kconfig 2012-10-13 09:52:59.000000000 -0400
-@@ -207,7 +207,7 @@
+@@ -208,7 +208,7 @@
config GRKERNSEC_PROC_GID
int "GID exempted from /proc restrictions"
@@ -82,7 +82,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group will be exempted from
grsecurity's /proc restrictions, allowing users of the specified
-@@ -218,7 +218,7 @@
+@@ -219,7 +219,7 @@
config GRKERNSEC_TPE_UNTRUSTED_GID
int "GID for TPE-untrusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -91,7 +91,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines which group untrusted users should
be added to. These users will be placed under grsecurity's Trusted Path
-@@ -230,7 +230,7 @@
+@@ -231,7 +231,7 @@
config GRKERNSEC_TPE_TRUSTED_GID
int "GID for TPE-trusted users"
depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -100,7 +100,7 @@ diff -Nuar a/security/Kconfig b/security/Kconfig
help
Setting this GID determines what group TPE restrictions will be
*disabled* for. If the sysctl option is enabled, a sysctl option
-@@ -239,7 +239,7 @@
+@@ -240,7 +240,7 @@
config GRKERNSEC_SYMLINKOWN_GID
int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
depends on GRKERNSEC_CONFIG_SERVER
diff --git a/4.5.7/4470_disable-compat_vdso.patch b/4.5.7/4470_disable-compat_vdso.patch
index 4aba080..febce96 100644
--- a/4.5.7/4470_disable-compat_vdso.patch
+++ b/4.5.7/4470_disable-compat_vdso.patch
@@ -26,7 +26,7 @@ Closes bug: http://bugs.gentoo.org/show_bug.cgi?id=210138
diff -urp a/arch/x86/Kconfig b/arch/x86/Kconfig
--- a/arch/x86/Kconfig 2009-07-31 01:36:57.323857684 +0100
+++ b/arch/x86/Kconfig 2009-07-31 01:51:39.395749681 +0100
-@@ -2044,29 +2044,8 @@
+@@ -2047,29 +2047,8 @@
config COMPAT_VDSO
def_bool n
diff --git a/4.5.7/4475_emutramp_default_on.patch b/4.5.7/4475_emutramp_default_on.patch
index afd6019..feb8c7b 100644
--- a/4.5.7/4475_emutramp_default_on.patch
+++ b/4.5.7/4475_emutramp_default_on.patch
@@ -10,7 +10,7 @@ See bug:
diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/security/Kconfig
--- linux-3.9.2-hardened.orig/security/Kconfig 2013-05-18 08:53:41.000000000 -0400
+++ linux-3.9.2-hardened/security/Kconfig 2013-05-18 09:17:57.000000000 -0400
-@@ -439,7 +439,7 @@
+@@ -440,7 +440,7 @@
config PAX_EMUTRAMP
bool "Emulate trampolines"
@@ -19,7 +19,7 @@ diff -Naur linux-3.9.2-hardened.orig/security/Kconfig linux-3.9.2-hardened/secur
depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
help
There are some programs and libraries that for one reason or
-@@ -462,6 +462,12 @@
+@@ -463,6 +463,12 @@
utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
for the affected files.