summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <basile@opensource.dyc.edu>2010-09-19 09:56:35 -0400
committerAnthony G. Basile <basile@opensource.dyc.edu>2010-09-19 09:56:35 -0400
commita31c29d8aa9c1cc7d5065710b8f9716bd117bf11 (patch)
treee81d9a8528846cef1358b9af725358598eb88b07
parentFixed compat_alloc_user_space undefined (diff)
downloadhardened-patchset-20100917.tar.gz
hardened-patchset-20100917.tar.bz2
hardened-patchset-20100917.zip
Updated Grsec/PaX20100917
2.2.0-2.6.32.21-201009171945 for 2.6.32.21 2.2.0-2.6.34.7-201009171945 for 2.6.34.6 2.2.0-2.6.35.4-201009172030 for 2.6.35.4
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch (renamed from 2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch)1257
-rw-r--r--2.6.32/4425_grsec-pax-without-grsec.patch8
-rw-r--r--2.6.32/4430_grsec-kconfig-default-gids.patch18
-rw-r--r--2.6.32/4440_selinux-avc_audit-log-curr_ip.patch2
-rw-r--r--2.6.34/0000_README2
-rw-r--r--2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch (renamed from 2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch)1208
-rw-r--r--2.6.34/4425_grsec-pax-without-grsec.patch10
-rw-r--r--2.6.34/4430_grsec-kconfig-default-gids.patch18
-rw-r--r--2.6.34/4440_selinux-avc_audit-log-curr_ip.patch2
-rw-r--r--2.6.35/0000_README50
-rw-r--r--2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch56800
-rw-r--r--2.6.35/4421_grsec-remove-localversion-grsec.patch9
-rw-r--r--2.6.35/4422_grsec-mute-warnings.patch35
-rw-r--r--2.6.35/4423_grsec-remove-protected-paths.patch20
-rw-r--r--2.6.35/4425_grsec-pax-without-grsec.patch92
-rw-r--r--2.6.35/4430_grsec-kconfig-default-gids.patch76
-rw-r--r--2.6.35/4435_grsec-kconfig-gentoo.patch444
-rw-r--r--2.6.35/4440_selinux-avc_audit-log-curr_ip.patch65
-rw-r--r--2.6.35/4445_disable-compat_vdso.patch46
-rw-r--r--2.6.35/4450_check_ssp_fix.patch17
21 files changed, 59537 insertions, 644 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 495f8be..e980fa6 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -3,7 +3,7 @@ README
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch
+Patch: 4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch b/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch
index 4ed5e67..653c257 100644
--- a/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch
+++ b/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch
@@ -50,7 +50,16 @@ diff -urNp linux-2.6.32.21/arch/alpha/kernel/module.c linux-2.6.32.21/arch/alpha
for (i = 0; i < n; i++) {
diff -urNp linux-2.6.32.21/arch/alpha/kernel/osf_sys.c linux-2.6.32.21/arch/alpha/kernel/osf_sys.c
--- linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-09-17 18:34:04.000000000 -0400
+@@ -1169,7 +1169,7 @@ arch_get_unmapped_area_1(unsigned long a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
@@ -1205,6 +1205,10 @@ arch_get_unmapped_area(struct file *filp
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
@@ -446,7 +455,7 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/fault.c linux-2.6.32.21/arch/arm/mm/fault
*
diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
--- linux-2.6.32.21/arch/arm/mm/mmap.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/arm/mm/mmap.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/arm/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400
@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
if (len > TASK_SIZE)
return -ENOMEM;
@@ -458,7 +467,13 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -75,10 +79,10 @@ arch_get_unmapped_area(struct file *filp
+@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
return addr;
}
if (len > mm->cached_hole_size) {
@@ -472,7 +487,7 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
}
full_search:
-@@ -94,8 +98,8 @@ full_search:
+@@ -94,14 +97,14 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -483,6 +498,13 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
mm->cached_hole_size = 0;
goto full_search;
}
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
diff -urNp linux-2.6.32.21/arch/arm/plat-s3c/pm.c linux-2.6.32.21/arch/arm/plat-s3c/pm.c
--- linux-2.6.32.21/arch/arm/plat-s3c/pm.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/arm/plat-s3c/pm.c 2010-09-04 15:54:51.000000000 -0400
@@ -618,6 +640,37 @@ diff -urNp linux-2.6.32.21/arch/frv/include/asm/kmap_types.h linux-2.6.32.21/arc
KM_TYPE_NR
};
+diff -urNp linux-2.6.32.21/arch/frv/mm/elf-fdpic.c linux-2.6.32.21/arch/frv/mm/elf-fdpic.c
+--- linux-2.6.32.21/arch/frv/mm/elf-fdpic.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/frv/mm/elf-fdpic.c 2010-09-17 18:34:04.000000000 -0400
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
diff -urNp linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c
--- linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c 2010-09-04 15:54:51.000000000 -0400
@@ -1023,7 +1076,7 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.21/arch/i
.map_page = swiotlb_map_page,
diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c
--- linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-09-17 18:34:04.000000000 -0400
@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
if (REGION_NUMBER(addr) == RGN_HPAGE)
addr = 0;
@@ -1038,7 +1091,7 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64
if (!addr)
addr = mm->free_area_cache;
-@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -1050,6 +1103,12 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64
goto full_search;
}
return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
diff -urNp linux-2.6.32.21/arch/ia64/kernel/topology.c linux-2.6.32.21/arch/ia64/kernel/topology.c
--- linux-2.6.32.21/arch/ia64/kernel/topology.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/ia64/kernel/topology.c 2010-09-04 15:54:51.000000000 -0400
@@ -1126,6 +1185,18 @@ diff -urNp linux-2.6.32.21/arch/ia64/mm/fault.c linux-2.6.32.21/arch/ia64/mm/fau
survive:
/*
* If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c
+--- linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400
+@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
diff -urNp linux-2.6.32.21/arch/ia64/mm/init.c linux-2.6.32.21/arch/ia64/mm/init.c
--- linux-2.6.32.21/arch/ia64/mm/init.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/ia64/mm/init.c 2010-09-04 15:54:51.000000000 -0400
@@ -1312,8 +1383,8 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/process.c linux-2.6.32.21/arch/mips/
-}
diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/kernel/syscall.c
--- linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-09-04 15:54:51.000000000 -0400
-@@ -102,6 +102,11 @@ unsigned long arch_get_unmapped_area(str
++++ linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-09-17 18:34:04.000000000 -0400
+@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
@@ -1325,8 +1396,12 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -112,7 +117,7 @@ unsigned long arch_get_unmapped_area(str
- (!vmm || addr + len <= vmm->vm_start))
+ else
+ addr = PAGE_ALIGN(addr);
+ vmm = find_vma(current->mm, addr);
+- if (task_size - len >= addr &&
+- (!vmm || addr + len <= vmm->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
return addr;
}
- addr = TASK_UNMAPPED_BASE;
@@ -1334,6 +1409,15 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
+@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (task_size - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (do_color_align)
diff -urNp linux-2.6.32.21/arch/mips/mm/fault.c linux-2.6.32.21/arch/mips/mm/fault.c
--- linux-2.6.32.21/arch/mips/mm/fault.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/mips/mm/fault.c 2010-09-04 15:54:51.000000000 -0400
@@ -1516,7 +1600,25 @@ diff -urNp linux-2.6.32.21/arch/parisc/kernel/module.c linux-2.6.32.21/arch/pari
me->arch.unwind_section, table, end, gp);
diff -urNp linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c
--- linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-09-17 18:34:04.000000000 -0400
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
if (flags & MAP_FIXED)
return addr;
@@ -2671,8 +2773,38 @@ diff -urNp linux-2.6.32.21/arch/powerpc/mm/mmap_64.c linux-2.6.32.21/arch/powerp
}
diff -urNp linux-2.6.32.21/arch/powerpc/mm/slice.c linux-2.6.32.21/arch/powerpc/mm/slice.c
--- linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-09-04 15:54:51.000000000 -0400
-@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un
++++ linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-09-17 18:34:04.000000000 -0400
+@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+ {
+ return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
+ 1ul << SLICE_LOW_SHIFT);
+@@ -256,7 +255,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un
if (fixed && addr > (mm->task_size - len))
return -EINVAL;
@@ -3115,6 +3247,56 @@ diff -urNp linux-2.6.32.21/arch/sh/kernel/kgdb.c linux-2.6.32.21/arch/sh/kernel/
/* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 },
+diff -urNp linux-2.6.32.21/arch/sh/mm/mmap.c linux-2.6.32.21/arch/sh/mm/mmap.c
+--- linux-2.6.32.21/arch/sh/mm/mmap.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/sh/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
diff -urNp linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h
--- linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h 2010-08-29 21:08:20.000000000 -0400
+++ linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h 2010-09-15 02:34:10.000000000 -0400
@@ -3669,7 +3851,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.21/arch/sp
.map_page = dma_4v_map_page,
diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c
--- linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 18:34:04.000000000 -0400
@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
if (ARCH_SUN4C && len > 0x20000000)
return -ENOMEM;
@@ -3679,9 +3861,18 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.21/arch
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr);
+@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c
--- linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 18:34:04.000000000 -0400
@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -3702,7 +3893,14 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -153,9 +157,9 @@ unsigned long arch_get_unmapped_area(str
+@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
}
if (len > mm->cached_hole_size) {
@@ -3714,7 +3912,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
mm->cached_hole_size = 0;
}
-@@ -175,8 +179,8 @@ full_search:
+@@ -175,14 +178,14 @@ full_search:
vma = find_vma(mm, VA_EXCLUDE_END);
}
if (unlikely(task_size < addr)) {
@@ -3725,7 +3923,14 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -216,7 +220,7 @@ arch_get_unmapped_area_topdown(struct fi
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
@@ -3734,7 +3939,35 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
-@@ -384,6 +388,12 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -279,7 +281,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -384,6 +386,12 @@ void arch_pick_mmap_layout(struct mm_str
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -3747,7 +3980,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
-@@ -398,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -398,6 +406,12 @@ void arch_pick_mmap_layout(struct mm_str
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -4033,8 +4266,8 @@ diff -urNp linux-2.6.32.21/arch/sparc/lib/atomic_64.S linux-2.6.32.21/arch/sparc
bne,pn %xcc, 2f
diff -urNp linux-2.6.32.21/arch/sparc/lib/ksyms.c linux-2.6.32.21/arch/sparc/lib/ksyms.c
--- linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-09-04 15:54:51.000000000 -0400
-@@ -144,12 +144,15 @@ EXPORT_SYMBOL(__downgrade_write);
++++ linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-09-17 17:45:39.000000000 -0400
+@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
/* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add);
@@ -4044,7 +4277,9 @@ diff -urNp linux-2.6.32.21/arch/sparc/lib/ksyms.c linux-2.6.32.21/arch/sparc/lib
+EXPORT_SYMBOL(atomic_sub_unchecked);
EXPORT_SYMBOL(atomic_sub_ret);
EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_unchecked);
EXPORT_SYMBOL(atomic64_sub_ret);
@@ -4969,6 +5204,46 @@ diff -urNp linux-2.6.32.21/arch/sparc/mm/fault_64.c linux-2.6.32.21/arch/sparc/m
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c
+--- linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400
+@@ -69,7 +69,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -126,7 +126,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -183,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
diff -urNp linux-2.6.32.21/arch/sparc/mm/init_32.c linux-2.6.32.21/arch/sparc/mm/init_32.c
--- linux-2.6.32.21/arch/sparc/mm/init_32.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/sparc/mm/init_32.c 2010-09-04 15:54:51.000000000 -0400
@@ -9393,7 +9668,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess_64.h linux-2.6.32.21/arc
#endif /* _ASM_X86_UACCESS_64_H */
diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x86/include/asm/uaccess.h
--- linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-09-16 23:14:31.000000000 -0400
@@ -8,12 +8,15 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
@@ -9458,22 +9733,9 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
/*
* The exception table consists of pairs of addresses: the first is the
-@@ -179,17 +213,34 @@ extern int __get_user_bad(void);
- __ret_gu; \
- })
-
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __put_user_x(size, x, ptr, __ret_pu) \
-+ ({ \
-+ int __dummy; \
-+ asm volatile("call __put_user_" #size : "=a" (__ret_pu), "=c" (__dummy) \
-+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx"); \
-+ })
-+#else
- #define __put_user_x(size, x, ptr, __ret_pu) \
+@@ -183,13 +217,21 @@ extern int __get_user_bad(void);
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
-+#endif
-
+#ifdef CONFIG_X86_32
@@ -9496,7 +9758,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
-@@ -197,15 +248,18 @@ extern int __get_user_bad(void);
+@@ -197,15 +239,18 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
@@ -9519,7 +9781,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
-@@ -374,16 +428,18 @@ do { \
+@@ -374,16 +419,18 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -9541,7 +9803,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
#define __get_user_size_ex(x, ptr, size) \
do { \
-@@ -407,10 +463,12 @@ do { \
+@@ -407,10 +454,12 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -9556,7 +9818,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
#define __put_user_nocheck(x, ptr, size) \
({ \
-@@ -424,13 +482,24 @@ do { \
+@@ -424,13 +473,24 @@ do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
@@ -9583,7 +9845,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
/*
* Tell gcc we read from memory instead of writing: this is because
-@@ -438,21 +507,26 @@ struct __large_struct { unsigned long bu
+@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -9614,7 +9876,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
/*
* uaccess_try and catch
-@@ -530,7 +604,7 @@ struct __large_struct { unsigned long bu
+@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
@@ -9623,7 +9885,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
} while (0)
#ifdef CONFIG_X86_WP_WORKS_OK
-@@ -567,6 +641,7 @@ extern struct movsl_mask {
+@@ -567,6 +632,7 @@ extern struct movsl_mask {
#define ARCH_HAS_NOCACHE_UACCESS 1
@@ -13721,7 +13983,26 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/signal.c linux-2.6.32.21/arch/x86/ker
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
diff -urNp linux-2.6.32.21/arch/x86/kernel/smpboot.c linux-2.6.32.21/arch/x86/kernel/smpboot.c
--- linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-09-17 17:44:35.000000000 -0400
+@@ -95,14 +95,14 @@ static DEFINE_PER_CPU(struct task_struct
+ */
+ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
+
+-void cpu_hotplug_driver_lock()
++void cpu_hotplug_driver_lock(void)
+ {
+- mutex_lock(&x86_cpu_hotplug_driver_mutex);
++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
+ }
+
+-void cpu_hotplug_driver_unlock()
++void cpu_hotplug_driver_unlock(void)
+ {
+- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
+ }
+
+ ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
@@ -748,7 +748,11 @@ do_rest:
(unsigned long)task_stack_page(c_idle.idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
@@ -13792,7 +14073,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/syscall_table_32.S linux-2.6.32.21/ar
.long sys_exit
diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c
--- linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-09-17 18:34:04.000000000 -0400
@@ -24,6 +24,21 @@
#include <asm/syscalls.h>
@@ -13815,7 +14096,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/i386 didn't use to be able to handle more than
-@@ -58,6 +73,205 @@ out:
+@@ -58,6 +73,208 @@ out:
return err;
}
@@ -13844,10 +14125,11 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
-+ vma = find_vma(mm, addr);
-+ if (pax_task_size - len >= addr &&
-+ (!vma || addr + len <= vma->vm_start))
-+ return addr;
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
@@ -13887,13 +14169,8 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ }
+ return -ENOMEM;
+ }
-+ if (!vma || addr + len <= vma->vm_start) {
-+ /*
-+ * Remember the place where we stopped the search:
-+ */
-+ mm->free_area_cache = addr + len;
-+ return addr;
-+ }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
@@ -13903,6 +14180,12 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ goto full_search;
+ }
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+}
+
+unsigned long
@@ -13938,10 +14221,11 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
-+ vma = find_vma(mm, addr);
-+ if (pax_task_size - len >= addr &&
-+ (!vma || addr + len <= vma->vm_start))
-+ return addr;
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
@@ -13956,7 +14240,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
-+ if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
@@ -13973,7 +14257,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
-+ if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
@@ -14021,7 +14305,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
struct sel_arg_struct {
unsigned long n;
-@@ -93,7 +307,7 @@ asmlinkage int sys_ipc(uint call, int fi
+@@ -93,7 +310,7 @@ asmlinkage int sys_ipc(uint call, int fi
return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
case SEMTIMEDOP:
return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
@@ -14030,7 +14314,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
case SEMGET:
return sys_semget(first, second, third);
-@@ -140,7 +354,7 @@ asmlinkage int sys_ipc(uint call, int fi
+@@ -140,7 +357,7 @@ asmlinkage int sys_ipc(uint call, int fi
ret = do_shmat(first, (char __user *) ptr, second, &raddr);
if (ret)
return ret;
@@ -14041,7 +14325,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
if (!segment_eq(get_fs(), get_ds()))
diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c
--- linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-09-17 18:34:04.000000000 -0400
@@ -32,8 +32,8 @@ out:
return error;
}
@@ -14062,7 +14346,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
*end = TASK_SIZE;
}
}
-@@ -69,11 +69,15 @@ arch_get_unmapped_area(struct file *filp
+@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -14079,7 +14363,22 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
-@@ -128,7 +132,7 @@ arch_get_unmapped_area_topdown(struct fi
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+@@ -106,7 +109,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -14088,7 +14387,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -141,6 +145,10 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
goto bottomup;
@@ -14099,7 +14398,32 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
-@@ -198,13 +206,21 @@ bottomup:
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr-len;
+ }
+@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -198,13 +204,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -14599,22 +14923,13 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmi_32.c linux-2.6.32.21/arch/x86/ker
local_irq_save(flags);
diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S
--- linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-09-04 15:54:51.000000000 -0400
-@@ -26,6 +26,22 @@
++++ linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-09-17 17:29:28.000000000 -0400
+@@ -26,6 +26,13 @@
#include <asm/page_types.h>
#include <asm/cache.h>
#include <asm/boot.h>
+#include <asm/segment.h>
+
-+#undef PMD_SIZE
-+#undef PMD_SHIFT
-+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-+#define PMD_SHIFT 21
-+#else
-+#define PMD_SHIFT 22
-+#endif
-+#define PMD_SIZE (1 << PMD_SHIFT)
-+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
+#else
@@ -14623,7 +14938,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
#undef i386 /* in case the preprocessor is a 32bit one */
-@@ -34,40 +50,55 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
+@@ -34,40 +41,55 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
@@ -14689,7 +15004,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
HEAD_TEXT
#ifdef CONFIG_X86_32
. = ALIGN(PAGE_SIZE);
-@@ -82,28 +113,69 @@ SECTIONS
+@@ -82,28 +104,69 @@ SECTIONS
IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
@@ -14766,7 +15081,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
PAGE_ALIGNED_DATA(PAGE_SIZE)
-@@ -166,12 +238,6 @@ SECTIONS
+@@ -166,12 +229,6 @@ SECTIONS
}
vgetcpu_mode = VVIRT(.vgetcpu_mode);
@@ -14779,7 +15094,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
*(.vsyscall_3)
}
-@@ -187,12 +253,19 @@ SECTIONS
+@@ -187,12 +244,19 @@ SECTIONS
#endif /* CONFIG_X86_64 */
/* Init code and data - will be freed after init */
@@ -14802,7 +15117,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
/*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - .init.text - should
-@@ -201,12 +274,27 @@ SECTIONS
+@@ -201,12 +265,27 @@ SECTIONS
PERCPU_VADDR(0, :percpu)
#endif
@@ -14818,7 +15133,8 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
+ VMLINUX_SYMBOL(_einittext) = .;
+ . = ALIGN(PAGE_SIZE);
+ } :text.init
-+
+
+- INIT_DATA_SECTION(16)
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .altinstructions and .eh_frame
@@ -14828,14 +15144,13 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
+ . = ALIGN(16);
+ } :text.exit
+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
-
-- INIT_DATA_SECTION(16)
++
+ . = ALIGN(PAGE_SIZE);
+ INIT_DATA_SECTION(16) :init
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
-@@ -232,19 +320,11 @@ SECTIONS
+@@ -232,19 +311,11 @@ SECTIONS
*(.altinstr_replacement)
}
@@ -14856,7 +15171,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
PERCPU(PAGE_SIZE)
#endif
-@@ -267,12 +347,6 @@ SECTIONS
+@@ -267,12 +338,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
}
@@ -14869,7 +15184,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
/* BSS */
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-@@ -288,6 +362,7 @@ SECTIONS
+@@ -288,6 +353,7 @@ SECTIONS
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
*(.brk_reservation) /* areas brk users have reserved */
@@ -14877,7 +15192,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
__brk_limit = .;
}
-@@ -316,13 +391,12 @@ SECTIONS
+@@ -316,13 +382,12 @@ SECTIONS
* for the boot processor.
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
@@ -18096,7 +18411,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/highmem_32.c linux-2.6.32.21/arch/x86/mm/
}
diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm/hugetlbpage.c
--- linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400
@@ -267,13 +267,18 @@ static unsigned long hugetlb_get_unmappe
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
@@ -18120,7 +18435,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
}
full_search:
-@@ -281,13 +286,13 @@ full_search:
+@@ -281,26 +286,27 @@ full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@@ -18137,18 +18452,38 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -310,9 +315,8 @@ static unsigned long hugetlb_get_unmappe
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
++
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -309,10 +315,9 @@ static unsigned long hugetlb_get_unmappe
+ {
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev_vma;
+- struct vm_area_struct *vma, *prev_vma;
- unsigned long base = mm->mmap_base, addr = addr0;
++ struct vm_area_struct *vma;
+ unsigned long base = mm->mmap_base, addr;
unsigned long largest_hole = mm->cached_hole_size;
- int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
-@@ -322,7 +326,7 @@ static unsigned long hugetlb_get_unmappe
+@@ -322,7 +327,7 @@ static unsigned long hugetlb_get_unmappe
largest_hole = 0;
mm->free_area_cache = base;
}
@@ -18157,7 +18492,51 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
goto fail;
-@@ -364,22 +368,26 @@ try_again:
+@@ -330,33 +335,27 @@ try_again:
+ /* either no address requested or cant fit in requested address hole */
+ addr = (mm->free_area_cache - len) & huge_page_mask(h);
+ do {
++ vma = find_vma(mm, addr);
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+- */
+- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+- return addr;
+-
+- /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+- if (addr + len <= vma->vm_start &&
+- (!prev_vma || (addr >= prev_vma->vm_end))) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+- mm->cached_hole_size = largest_hole;
+- return (mm->free_area_cache = addr);
+- } else {
+- /* pull free_area_cache down to the first hole */
+- if (mm->free_area_cache == vma->vm_end) {
+- mm->free_area_cache = vma->vm_start;
+- mm->cached_hole_size = largest_hole;
+- }
++ mm->cached_hole_size = largest_hole;
++ return (mm->free_area_cache = addr);
++ }
++ /* pull free_area_cache down to the first hole */
++ if (mm->free_area_cache == vma->vm_end) {
++ mm->free_area_cache = vma->vm_start;
++ mm->cached_hole_size = largest_hole;
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+- largest_hole = vma->vm_start - addr;
++ largest_hole = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = (vma->vm_start - len) & huge_page_mask(h);
+@@ -364,22 +363,26 @@ try_again:
fail:
/*
@@ -18195,7 +18574,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
mm->cached_hole_size = ~0UL;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
-@@ -387,6 +395,7 @@ fail:
+@@ -387,6 +390,7 @@ fail:
/*
* Restore the topdown base:
*/
@@ -18203,7 +18582,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
-@@ -400,10 +409,17 @@ hugetlb_get_unmapped_area(struct file *f
+@@ -400,10 +404,17 @@ hugetlb_get_unmapped_area(struct file *f
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -18222,15 +18601,16 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
return -ENOMEM;
if (flags & MAP_FIXED) {
-@@ -415,7 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
+@@ -415,8 +426,7 @@ hugetlb_get_unmapped_area(struct file *f
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
-+ if (pax_task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+- (!vma || addr + len <= vma->vm_start))
++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
return addr;
}
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
diff -urNp linux-2.6.32.21/arch/x86/mm/init_32.c linux-2.6.32.21/arch/x86/mm/init_32.c
--- linux-2.6.32.21/arch/x86/mm/init_32.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/x86/mm/init_32.c 2010-09-04 15:54:51.000000000 -0400
@@ -18602,7 +18982,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/init_64.c linux-2.6.32.21/arch/x86/mm/ini
return "[vsyscall]";
diff -urNp linux-2.6.32.21/arch/x86/mm/init.c linux-2.6.32.21/arch/x86/mm/init.c
--- linux-2.6.32.21/arch/x86/mm/init.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/mm/init.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/mm/init.c 2010-09-16 22:50:17.000000000 -0400
@@ -69,11 +69,7 @@ static void __init find_early_table_spac
* cause a hotspot and fill up ZONE_DMA. The page tables
* need roughly 0.5KB per GB.
@@ -18616,6 +18996,15 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/init.c linux-2.6.32.21/arch/x86/mm/init.c
e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
tables, PAGE_SIZE);
if (e820_table_start == -1UL)
+@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
+ #endif
+
+ set_nx();
+- if (nx_enabled)
++ if (nx_enabled && cpu_has_nx)
+ printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+
+ /* Enable PSE if available */
@@ -331,7 +327,13 @@ unsigned long __init_refok init_memory_m
*/
int devmem_is_allowed(unsigned long pagenr)
@@ -20213,7 +20602,7 @@ diff -urNp linux-2.6.32.21/arch/x86/vdso/vma.c linux-2.6.32.21/arch/x86/vdso/vma
-__setup("vdso=", vdso_setup);
diff -urNp linux-2.6.32.21/arch/x86/xen/enlighten.c linux-2.6.32.21/arch/x86/xen/enlighten.c
--- linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-09-17 17:30:16.000000000 -0400
@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
@@ -20241,10 +20630,10 @@ diff -urNp linux-2.6.32.21/arch/x86/xen/enlighten.c linux-2.6.32.21/arch/x86/xen
- check_efer();
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
-+ (cpuid_edx(0x80000001) & (1 << (X86_FEATURE_NX & 31)))) {
++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
+ unsigned l, h;
+
-+#if defined(CONFIG_X86_32)
++#ifdef CONFIG_X86_PAE
+ nx_enabled = 1;
+#endif
+ __supported_pte_mask |= _PAGE_NX;
@@ -31772,19 +32161,6 @@ diff -urNp linux-2.6.32.21/fs/ext4/balloc.c linux-2.6.32.21/fs/ext4/balloc.c
if (free_blocks >= (nblocks + dirty_blocks))
return 1;
}
-diff -urNp linux-2.6.32.21/fs/ext4/ioctl.c linux-2.6.32.21/fs/ext4/ioctl.c
---- linux-2.6.32.21/fs/ext4/ioctl.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/fs/ext4/ioctl.c 2010-09-04 15:54:52.000000000 -0400
-@@ -230,6 +230,9 @@ setversion_out:
- struct file *donor_filp;
- int err;
-
-+ /* temporary workaround for bugs in here */
-+ return -EOPNOTSUPP;
-+
- if (!(filp->f_mode & FMODE_READ) ||
- !(filp->f_mode & FMODE_WRITE))
- return -EBADF;
diff -urNp linux-2.6.32.21/fs/ext4/namei.c linux-2.6.32.21/fs/ext4/namei.c
--- linux-2.6.32.21/fs/ext4/namei.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/fs/ext4/namei.c 2010-09-04 15:54:52.000000000 -0400
@@ -34418,7 +34794,7 @@ diff -urNp linux-2.6.32.21/fs/proc/root.c linux-2.6.32.21/fs/proc/root.c
diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
--- linux-2.6.32.21/fs/proc/task_mmu.c 2010-08-29 21:08:16.000000000 -0400
-+++ linux-2.6.32.21/fs/proc/task_mmu.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/fs/proc/task_mmu.c 2010-09-17 18:40:06.000000000 -0400
@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
@@ -34462,15 +34838,30 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
-@@ -223,13 +240,22 @@ static void show_map_vma(struct seq_file
- start += PAGE_SIZE;
+@@ -206,7 +223,6 @@ static void show_map_vma(struct seq_file
+ int flags = vma->vm_flags;
+ unsigned long ino = 0;
+ unsigned long long pgoff = 0;
+- unsigned long start;
+ dev_t dev = 0;
+ int len;
+@@ -217,19 +233,23 @@ static void show_map_vma(struct seq_file
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ }
+
+- /* We don't show the stack guard page in /proc/maps */
+- start = vma->vm_start;
+- if (vma->vm_flags & VM_GROWSDOWN)
+- start += PAGE_SIZE;
+-
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+- start,
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ PAX_RAND_FLAGS(mm) ? 0UL : start,
++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
+#else
- start,
++ vma->vm_start,
vma->vm_end,
+#endif
flags & VM_READ ? 'r' : '-',
@@ -34485,7 +34876,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
MAJOR(dev), MINOR(dev), ino, &len);
/*
-@@ -238,16 +264,16 @@ static void show_map_vma(struct seq_file
+@@ -238,16 +258,16 @@ static void show_map_vma(struct seq_file
*/
if (file) {
pad_len_spaces(m, len);
@@ -34507,7 +34898,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
name = "[stack]";
}
} else {
-@@ -390,9 +416,16 @@ static int show_smap(struct seq_file *m,
+@@ -390,9 +410,16 @@ static int show_smap(struct seq_file *m,
};
memset(&mss, 0, sizeof mss);
@@ -34527,7 +34918,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
show_map_vma(m, vma);
-@@ -408,7 +441,11 @@ static int show_smap(struct seq_file *m,
+@@ -408,7 +435,11 @@ static int show_smap(struct seq_file *m,
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n",
@@ -41692,8 +42083,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_fork.c linux-2.6.32.21/grsecurity/gr
+}
diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/grsec_init.c
--- linux-2.6.32.21/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/grsec_init.c 2010-09-04 15:54:52.000000000 -0400
-@@ -0,0 +1,258 @@
++++ linux-2.6.32.21/grsecurity/grsec_init.c 2010-09-17 19:24:55.000000000 -0400
+@@ -0,0 +1,266 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
@@ -41742,6 +42133,7 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/gr
+#endif
+int grsec_lastack_retries;
+int grsec_enable_tpe_all;
++int grsec_enable_tpe_invert;
+int grsec_enable_socket_all;
+int grsec_socket_all_gid;
+int grsec_enable_socket_client;
@@ -41832,6 +42224,13 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/gr
+#endif
+#endif
+
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ /* for backward compatibility, tpe_invert always defaults to on if
++ enabled in the kernel
++ */
++ grsec_enable_tpe_invert = 1;
++#endif
++
+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
+#ifndef CONFIG_GRKERNSEC_SYSCTL
+ grsec_lock = 1;
@@ -42828,8 +43227,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_sock.c linux-2.6.32.21/grsecurity/gr
+}
diff -urNp linux-2.6.32.21/grsecurity/grsec_sysctl.c linux-2.6.32.21/grsecurity/grsec_sysctl.c
--- linux-2.6.32.21/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/grsec_sysctl.c 2010-09-04 15:54:52.000000000 -0400
-@@ -0,0 +1,459 @@
++++ linux-2.6.32.21/grsecurity/grsec_sysctl.c 2010-09-17 19:22:27.000000000 -0400
+@@ -0,0 +1,469 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
@@ -43103,6 +43502,16 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_sysctl.c linux-2.6.32.21/grsecurity/
+ .proc_handler = &proc_dointvec,
+ },
+#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "tpe_invert",
++ .data = &grsec_enable_tpe_invert,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+ {
+ .ctl_name = CTL_UNNUMBERED,
@@ -43328,8 +43737,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_time.c linux-2.6.32.21/grsecurity/gr
+}
diff -urNp linux-2.6.32.21/grsecurity/grsec_tpe.c linux-2.6.32.21/grsecurity/grsec_tpe.c
--- linux-2.6.32.21/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/grsec_tpe.c 2010-09-04 15:54:52.000000000 -0400
-@@ -0,0 +1,38 @@
++++ linux-2.6.32.21/grsecurity/grsec_tpe.c 2010-09-17 19:28:20.000000000 -0400
+@@ -0,0 +1,39 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
@@ -43347,7 +43756,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_tpe.c linux-2.6.32.21/grsecurity/grs
+
+ if (cred->uid && ((grsec_enable_tpe &&
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+ !in_group_p(grsec_tpe_gid)
++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
+#else
+ in_group_p(grsec_tpe_gid)
+#endif
@@ -43435,8 +43845,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsum.c linux-2.6.32.21/grsecurity/grsum.c
+}
diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
--- linux-2.6.32.21/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/Kconfig 2010-09-14 21:34:38.000000000 -0400
-@@ -0,0 +1,987 @@
++++ linux-2.6.32.21/grsecurity/Kconfig 2010-09-17 19:36:28.000000000 -0400
+@@ -0,0 +1,986 @@
+#
+# grecurity configuration
+#
@@ -43588,7 +43998,7 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
+ select PAX_PT_PAX_FLAGS
+ select PAX_HAVE_ACL_FLAGS
+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
-+ select PAX_MEMORY_UDEREF if (X86_32 && !XEN)
++ select PAX_MEMORY_UDEREF if (X86 && !XEN)
+ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
+ select PAX_SEGMEXEC if (X86_32)
+ select PAX_PAGEEXEC
@@ -44197,11 +44607,14 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
+ is enabled, a sysctl option with name "tpe" is created.
+
+config GRKERNSEC_TPE_ALL
-+ bool "Partially restrict non-root users"
++ bool "Partially restrict all non-root users"
+ depends on GRKERNSEC_TPE
+ help
-+ If you say Y here, All non-root users other than the ones in the
-+ group specified in the main TPE option will only be allowed to
++ If you say Y here, all non-root users will be covered under
++ a weaker TPE restriction. This is separate from, and in addition to,
++ the main TPE options that you have selected elsewhere. Thus, if a
++ "trusted" GID is chosen, this restriction applies to even that GID.
++ Under this restriction, all non-root users will only be allowed to
+ execute files in directories they own that are not group or
+ world-writable, or in directories owned by root and writable only by
+ root. If the sysctl option is enabled, a sysctl option with name
@@ -44214,31 +44627,27 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
+ If you say Y here, the group you specify in the TPE configuration will
+ decide what group TPE restrictions will be *disabled* for. This
+ option is useful if you want TPE restrictions to be applied to most
-+ users on the system.
++ users on the system. If the sysctl option is enabled, a sysctl option
++ with name "tpe_invert" is created. Unlike other sysctl options, this
++ entry will default to on for backward-compatibility.
+
+config GRKERNSEC_TPE_GID
+ int "GID for untrusted users"
+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+ default 1005
+ help
-+ If you have selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *disabled* for.
-+ If you have not selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *enabled* for.
-+ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+ is created.
++ Setting this GID determines what group TPE restrictions will be
++ *enabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
+
+config GRKERNSEC_TPE_GID
+ int "GID for trusted users"
+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+ default 1005
+ help
-+ If you have selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *disabled* for.
-+ If you have not selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *enabled* for.
-+ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+ is created.
++ Setting this GID determines what group TPE restrictions will be
++ *disabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
+
+endmenu
+menu "Network Protections"
@@ -46216,7 +46625,7 @@ diff -urNp linux-2.6.32.21/include/linux/grdefs.h linux-2.6.32.21/include/linux/
+#endif
diff -urNp linux-2.6.32.21/include/linux/grinternal.h linux-2.6.32.21/include/linux/grinternal.h
--- linux-2.6.32.21/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/include/linux/grinternal.h 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/include/linux/grinternal.h 2010-09-17 19:39:50.000000000 -0400
@@ -0,0 +1,211 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
@@ -46282,7 +46691,7 @@ diff -urNp linux-2.6.32.21/include/linux/grinternal.h linux-2.6.32.21/include/li
+extern int grsec_enable_tpe;
+extern int grsec_tpe_gid;
+extern int grsec_enable_tpe_all;
-+extern int grsec_enable_sidcaps;
++extern int grsec_enable_tpe_invert;
+extern int grsec_enable_socket_all;
+extern int grsec_socket_all_gid;
+extern int grsec_enable_socket_client;
@@ -47499,7 +47908,7 @@ diff -urNp linux-2.6.32.21/include/linux/reiserfs_fs_sb.h linux-2.6.32.21/includ
on-disk FS format */
diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/sched.h
--- linux-2.6.32.21/include/linux/sched.h 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/include/linux/sched.h 2010-09-14 18:41:02.000000000 -0400
++++ linux-2.6.32.21/include/linux/sched.h 2010-09-17 18:34:04.000000000 -0400
@@ -101,6 +101,7 @@ struct bio;
struct fs_struct;
struct bts_context;
@@ -47508,7 +47917,19 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
/*
* List of flags we want to share for kernel threads,
-@@ -667,6 +668,15 @@ struct signal_struct {
+@@ -372,9 +373,11 @@ struct user_namespace;
+ #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+
+ #include <linux/aio.h>
+
++extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+@@ -667,6 +670,15 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
@@ -47524,7 +47945,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
int oom_adj; /* OOM kill score adjustment (bit shift) */
};
-@@ -1220,7 +1230,7 @@ struct rcu_node;
+@@ -1220,7 +1232,7 @@ struct rcu_node;
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -47533,7 +47954,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
-@@ -1332,8 +1342,8 @@ struct task_struct {
+@@ -1332,8 +1344,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -47544,7 +47965,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1347,16 +1357,6 @@ struct task_struct {
+@@ -1347,16 +1359,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -47561,7 +47982,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1440,6 +1440,15 @@ struct task_struct {
+@@ -1440,6 +1442,15 @@ struct task_struct {
int hardirq_context;
int softirq_context;
#endif
@@ -47577,7 +47998,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
-@@ -1460,6 +1469,9 @@ struct task_struct {
+@@ -1460,6 +1471,9 @@ struct task_struct {
struct backing_dev_info *backing_dev_info;
@@ -47587,7 +48008,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
struct io_context *io_context;
unsigned long ptrace_message;
-@@ -1523,6 +1535,20 @@ struct task_struct {
+@@ -1523,6 +1537,20 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
@@ -47608,7 +48029,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
-@@ -1546,6 +1572,52 @@ struct task_struct {
+@@ -1546,6 +1574,52 @@ struct task_struct {
#endif /* CONFIG_TRACING */
};
@@ -47661,7 +48082,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
-@@ -2146,7 +2218,7 @@ extern void __cleanup_sighand(struct sig
+@@ -2146,7 +2220,7 @@ extern void __cleanup_sighand(struct sig
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -47670,7 +48091,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2259,8 +2331,8 @@ static inline void unlock_task_sighand(s
+@@ -2259,8 +2333,8 @@ static inline void unlock_task_sighand(s
#ifndef __HAVE_THREAD_FUNCTIONS
@@ -47681,7 +48102,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
-@@ -2275,13 +2347,17 @@ static inline unsigned long *end_of_stac
+@@ -2275,13 +2349,17 @@ static inline unsigned long *end_of_stac
#endif
@@ -49315,7 +49736,7 @@ diff -urNp linux-2.6.32.21/kernel/fork.c linux-2.6.32.21/kernel/fork.c
new_fs = fs;
diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c
--- linux-2.6.32.21/kernel/futex.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/kernel/futex.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/kernel/futex.c 2010-09-17 17:43:01.000000000 -0400
@@ -54,6 +54,7 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
@@ -49345,19 +49766,17 @@ diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
-@@ -2376,7 +2382,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+@@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
{
struct robust_list_head __user *head;
unsigned long ret;
-- const struct cred *cred = current_cred(), *pcred;
+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ const struct cred *cred = current_cred();
-+ const struct cred *pcred;
+ const struct cred *cred = current_cred(), *pcred;
+#endif
if (!futex_cmpxchg_enabled)
return -ENOSYS;
-@@ -2392,11 +2401,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+@@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
if (!p)
goto err_unlock;
ret = -EPERM;
@@ -49374,7 +49793,7 @@ diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c
head = p->robust_list;
rcu_read_unlock();
}
-@@ -2458,7 +2472,7 @@ retry:
+@@ -2458,7 +2471,7 @@ retry:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
@@ -50948,7 +51367,7 @@ diff -urNp linux-2.6.32.21/kernel/sys.c linux-2.6.32.21/kernel/sys.c
}
diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
--- linux-2.6.32.21/kernel/sysctl.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/kernel/sysctl.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/kernel/sysctl.c 2010-09-17 18:34:04.000000000 -0400
@@ -63,6 +63,13 @@
static int deprecated_sysctl_warning(struct __sysctl_args *args);
@@ -51018,7 +51437,21 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
{
.ctl_name = CTL_UNNUMBERED,
.procname = "sched_child_runs_first",
-@@ -1803,6 +1844,8 @@ static int do_sysctl_strategy(struct ctl
+@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
++ {
++ .procname = "heap_stack_gap",
++ .data = &sysctl_heap_stack_gap,
++ .maxlen = sizeof(sysctl_heap_stack_gap),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ #else
+ {
+ .ctl_name = CTL_UNNUMBERED,
+@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
return 0;
}
@@ -51027,7 +51460,7 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
static int parse_table(int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
-@@ -1821,7 +1864,7 @@ repeat:
+@@ -1821,7 +1871,7 @@ repeat:
if (n == table->ctl_name) {
int error;
if (table->child) {
@@ -51036,7 +51469,7 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
return -EPERM;
name++;
nlen--;
-@@ -1906,6 +1949,33 @@ int sysctl_perm(struct ctl_table_root *r
+@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
int error;
int mode;
@@ -51260,25 +51693,6 @@ diff -urNp linux-2.6.32.21/kernel/trace/ftrace.c linux-2.6.32.21/kernel/trace/ft
}
/*
-diff -urNp linux-2.6.32.21/kernel/trace/Kconfig linux-2.6.32.21/kernel/trace/Kconfig
---- linux-2.6.32.21/kernel/trace/Kconfig 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/kernel/trace/Kconfig 2010-09-04 15:54:52.000000000 -0400
-@@ -126,6 +126,7 @@ if FTRACE
- config FUNCTION_TRACER
- bool "Kernel Function Tracer"
- depends on HAVE_FUNCTION_TRACER
-+ depends on !PAX_KERNEXEC
- select FRAME_POINTER
- select KALLSYMS
- select GENERIC_TRACER
-@@ -343,6 +344,7 @@ config POWER_TRACER
- config STACK_TRACER
- bool "Trace max stack"
- depends on HAVE_FUNCTION_TRACER
-+ depends on !PAX_KERNEXEC
- select FUNCTION_TRACER
- select STACKTRACE
- select KALLSYMS
diff -urNp linux-2.6.32.21/kernel/trace/ring_buffer.c linux-2.6.32.21/kernel/trace/ring_buffer.c
--- linux-2.6.32.21/kernel/trace/ring_buffer.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/kernel/trace/ring_buffer.c 2010-09-04 15:54:52.000000000 -0400
@@ -51821,16 +52235,8 @@ diff -urNp linux-2.6.32.21/mm/madvise.c linux-2.6.32.21/mm/madvise.c
goto out;
diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
--- linux-2.6.32.21/mm/memory.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/memory.c 2010-09-04 15:54:52.000000000 -0400
-@@ -48,6 +48,7 @@
- #include <linux/ksm.h>
- #include <linux/rmap.h>
- #include <linux/module.h>
-+#include <linux/security.h>
- #include <linux/delayacct.h>
- #include <linux/init.h>
- #include <linux/writeback.h>
-@@ -187,8 +188,12 @@ static inline void free_pmd_range(struct
++++ linux-2.6.32.21/mm/memory.c 2010-09-17 18:20:06.000000000 -0400
+@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
return;
pmd = pmd_offset(pud, start);
@@ -51843,7 +52249,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -220,8 +225,12 @@ static inline void free_pud_range(struct
+@@ -220,8 +224,12 @@ static inline void free_pud_range(struct
return;
pud = pud_offset(pgd, start);
@@ -51856,7 +52262,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
}
/*
-@@ -1251,10 +1260,10 @@ int __get_user_pages(struct task_struct
+@@ -1251,10 +1259,10 @@ int __get_user_pages(struct task_struct
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
@@ -51869,7 +52275,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
-@@ -1306,7 +1315,7 @@ int __get_user_pages(struct task_struct
+@@ -1306,7 +1314,7 @@ int __get_user_pages(struct task_struct
continue;
}
@@ -51878,7 +52284,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1381,7 +1390,7 @@ int __get_user_pages(struct task_struct
+@@ -1381,7 +1389,7 @@ int __get_user_pages(struct task_struct
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -51887,7 +52293,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
return i;
}
-@@ -1977,6 +1986,186 @@ static inline void cow_user_page(struct
+@@ -1977,6 +1985,186 @@ static inline void cow_user_page(struct
copy_user_highpage(dst, src, va, vma);
}
@@ -52074,7 +52480,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2156,6 +2345,12 @@ gotten:
+@@ -2156,6 +2344,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -52087,7 +52493,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter(mm, file_rss);
-@@ -2207,6 +2402,10 @@ gotten:
+@@ -2207,6 +2401,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -52098,7 +52504,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2604,6 +2803,11 @@ static int do_swap_page(struct mm_struct
+@@ -2604,6 +2802,11 @@ static int do_swap_page(struct mm_struct
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -52110,7 +52516,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock_page(page);
if (flags & FAULT_FLAG_WRITE) {
-@@ -2615,6 +2819,11 @@ static int do_swap_page(struct mm_struct
+@@ -2615,6 +2818,11 @@ static int do_swap_page(struct mm_struct
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
@@ -52122,7 +52528,41 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2665,7 +2874,7 @@ static int do_anonymous_page(struct mm_s
+@@ -2630,33 +2838,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_downwards()",
+- * except we must first make sure that 'address-PAGE_SIZE'
+- * doesn't hit another vma.
+- *
+- * The "find_vma()" will do the right thing even if we wrap
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- expand_stack(vma, address - PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2665,27 +2846,23 @@ static int do_anonymous_page(struct mm_s
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -52131,7 +52571,31 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
spinlock_t *ptl;
pte_t entry;
-@@ -2704,6 +2913,11 @@ static int do_anonymous_page(struct mm_s
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -2704,6 +2881,11 @@ static int do_anonymous_page(struct mm_s
if (!pte_none(*page_table))
goto release;
@@ -52143,7 +52607,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
inc_mm_counter(mm, anon_rss);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -2711,6 +2925,12 @@ setpte:
+@@ -2711,6 +2893,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, entry);
@@ -52156,7 +52620,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2853,6 +3073,12 @@ static int __do_fault(struct mm_struct *
+@@ -2853,6 +3041,12 @@ static int __do_fault(struct mm_struct *
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -52169,7 +52633,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -2872,6 +3098,14 @@ static int __do_fault(struct mm_struct *
+@@ -2872,6 +3066,14 @@ static int __do_fault(struct mm_struct *
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, entry);
@@ -52184,7 +52648,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
} else {
if (charged)
mem_cgroup_uncharge_page(page);
-@@ -3019,6 +3253,12 @@ static inline int handle_pte_fault(struc
+@@ -3019,6 +3221,12 @@ static inline int handle_pte_fault(struc
if (flags & FAULT_FLAG_WRITE)
flush_tlb_page(vma, address);
}
@@ -52197,7 +52661,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3035,6 +3275,10 @@ int handle_mm_fault(struct mm_struct *mm
+@@ -3035,6 +3243,10 @@ int handle_mm_fault(struct mm_struct *mm
pmd_t *pmd;
pte_t *pte;
@@ -52208,7 +52672,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3042,6 +3286,34 @@ int handle_mm_fault(struct mm_struct *mm
+@@ -3042,6 +3254,34 @@ int handle_mm_fault(struct mm_struct *mm
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -52243,7 +52707,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3139,7 +3411,7 @@ static int __init gate_vma_init(void)
+@@ -3139,7 +3379,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -52386,7 +52850,7 @@ diff -urNp linux-2.6.32.21/mm/migrate.c linux-2.6.32.21/mm/migrate.c
goto out;
diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
--- linux-2.6.32.21/mm/mlock.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/mlock.c 2010-09-04 15:54:56.000000000 -0400
++++ linux-2.6.32.21/mm/mlock.c 2010-09-17 18:47:09.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
@@ -52395,7 +52859,40 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/rmap.h>
-@@ -454,6 +455,9 @@ static int do_mlock(unsigned long start,
+@@ -138,19 +139,6 @@ void munlock_vma_page(struct page *page)
+ }
+ }
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_stack_continue(vma->vm_prev, addr);
+-}
+-
+ /**
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
+ * @vma: target vma
+@@ -183,12 +171,6 @@ static long __mlock_vma_pages_range(stru
+ if (vma->vm_flags & VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+- /* We don't try to access the guard page of a stack vma */
+- if (stack_guard_page(vma, start)) {
+- addr += PAGE_SIZE;
+- nr_pages--;
+- }
+-
+ while (nr_pages > 0) {
+ int i;
+
+@@ -454,6 +436,9 @@ static int do_mlock(unsigned long start,
return -EINVAL;
if (end == start)
return 0;
@@ -52405,7 +52902,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
vma = find_vma_prev(current->mm, start, &prev);
if (!vma || vma->vm_start > start)
return -ENOMEM;
-@@ -464,6 +468,11 @@ static int do_mlock(unsigned long start,
+@@ -464,6 +449,11 @@ static int do_mlock(unsigned long start,
for (nstart = start ; ; ) {
unsigned int newflags;
@@ -52417,7 +52914,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
newflags = vma->vm_flags | VM_LOCKED;
-@@ -513,6 +522,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+@@ -513,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
lock_limit >>= PAGE_SHIFT;
/* check against resource limits */
@@ -52425,7 +52922,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = do_mlock(start, len, 1);
up_write(&current->mm->mmap_sem);
-@@ -534,17 +544,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
+@@ -534,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
static int do_mlockall(int flags)
{
struct vm_area_struct * vma, * prev = NULL;
@@ -52453,17 +52950,17 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
newflags = vma->vm_flags | VM_LOCKED;
if (!(flags & MCL_CURRENT))
newflags &= ~VM_LOCKED;
-@@ -576,6 +592,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+@@ -576,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
lock_limit >>= PAGE_SHIFT;
ret = -ENOMEM;
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
--- linux-2.6.32.21/mm/mmap.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/mmap.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400
@@ -45,6 +45,16 @@
#define arch_rebalance_pgtables(addr, len) (addr)
#endif
@@ -52481,7 +52978,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
-@@ -70,16 +80,25 @@ static void unmap_region(struct mm_struc
+@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
*/
@@ -52509,7 +53006,14 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
EXPORT_SYMBOL(vm_get_page_prot);
-@@ -231,6 +250,7 @@ static struct vm_area_struct *remove_vma
+ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ struct percpu_counter vm_committed_as;
+
+ /*
+@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
struct vm_area_struct *next = vma->vm_next;
might_sleep();
@@ -52517,7 +53021,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file) {
-@@ -267,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
* not page aligned -Ram Gupta
*/
rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
@@ -52525,7 +53029,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -704,6 +725,12 @@ static int
+@@ -704,6 +726,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -52538,7 +53042,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -723,6 +750,12 @@ static int
+@@ -723,6 +751,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -52551,7 +53055,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
pgoff_t vm_pglen;
-@@ -765,12 +798,19 @@ can_vma_merge_after(struct vm_area_struc
+@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -52572,7 +53076,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -786,6 +826,15 @@ struct vm_area_struct *vma_merge(struct
+@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -52588,7 +53092,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Can it merge with the predecessor?
*/
-@@ -805,9 +854,24 @@ struct vm_area_struct *vma_merge(struct
+@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
/* cases 1, 6 */
vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -52614,7 +53118,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return prev;
}
-@@ -818,12 +882,27 @@ struct vm_area_struct *vma_merge(struct
+@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -52644,7 +53148,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return area;
}
-@@ -898,14 +977,11 @@ none:
+@@ -898,14 +978,11 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -52660,7 +53164,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->stack_vm += pages;
if (flags & (VM_RESERVED|VM_IO))
mm->reserved_vm += pages;
-@@ -932,7 +1008,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -52669,7 +53173,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -958,7 +1034,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -52678,7 +53182,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (addr & ~PAGE_MASK)
return addr;
-@@ -969,6 +1045,28 @@ unsigned long do_mmap_pgoff(struct file
+@@ -969,6 +1046,28 @@ unsigned long do_mmap_pgoff(struct file
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -52707,7 +53211,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -980,6 +1078,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -980,6 +1079,7 @@ unsigned long do_mmap_pgoff(struct file
locked += mm->locked_vm;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
@@ -52715,7 +53219,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1053,6 +1152,9 @@ unsigned long do_mmap_pgoff(struct file
+@@ -1053,6 +1153,9 @@ unsigned long do_mmap_pgoff(struct file
if (error)
return error;
@@ -52725,7 +53229,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1065,10 +1167,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
+@@ -1065,10 +1168,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
*/
int vma_wants_writenotify(struct vm_area_struct *vma)
{
@@ -52738,7 +53242,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1117,14 +1219,24 @@ unsigned long mmap_region(struct file *f
+@@ -1117,14 +1220,24 @@ unsigned long mmap_region(struct file *f
unsigned long charged = 0;
struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
@@ -52765,7 +53269,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
/* Check against address space limit. */
-@@ -1173,6 +1285,16 @@ munmap_back:
+@@ -1173,6 +1286,16 @@ munmap_back:
goto unacct_error;
}
@@ -52782,7 +53286,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1195,6 +1317,19 @@ munmap_back:
+@@ -1195,6 +1318,19 @@ munmap_back:
error = file->f_op->mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -52802,7 +53306,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
-@@ -1218,6 +1353,11 @@ munmap_back:
+@@ -1218,6 +1354,11 @@ munmap_back:
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -52814,7 +53318,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* Once vma denies write, undo our temporary denial count */
if (correct_wcount)
atomic_inc(&inode->i_writecount);
-@@ -1226,6 +1366,7 @@ out:
+@@ -1226,6 +1367,7 @@ out:
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -52822,7 +53326,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (vm_flags & VM_LOCKED) {
/*
* makes pages present; downgrades, drops, reacquires mmap_sem
-@@ -1248,6 +1389,12 @@ unmap_and_free_vma:
+@@ -1248,6 +1390,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -52835,7 +53339,41 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1281,6 +1428,10 @@ arch_get_unmapped_area(struct file *filp
+@@ -1255,6 +1403,33 @@ unacct_error:
+ return error;
+ }
+
++bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++{
++ if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++ if (addr > sysctl_heap_stack_gap)
++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++ else
++ vma = find_vma(current->mm, 0);
++ if (vma && (vma->vm_flags & VM_GROWSUP))
++ return false;
++#endif
++ return true;
++ }
++
++ if (addr + len > vma->vm_start)
++ return false;
++
++ if (vma->vm_flags & VM_GROWSDOWN)
++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
++#endif
++
++ return true;
++}
++
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+@@ -1281,18 +1456,23 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -52845,9 +53383,15 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
+
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-@@ -1289,10 +1440,10 @@ arch_get_unmapped_area(struct file *filp
- return addr;
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
}
if (len > mm->cached_hole_size) {
- start_addr = addr = mm->free_area_cache;
@@ -52860,7 +53404,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
full_search:
-@@ -1303,9 +1454,8 @@ full_search:
+@@ -1303,34 +1483,40 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -52872,7 +53416,29 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -1327,10 +1477,16 @@ full_search:
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- /*
+- * Remember the place where we stopped the search:
+- */
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+ #endif
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
@@ -52890,7 +53456,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->free_area_cache = addr;
mm->cached_hole_size = ~0UL;
}
-@@ -1348,7 +1504,7 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -1348,7 +1534,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -52899,7 +53465,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -1357,6 +1513,10 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -1357,13 +1543,18 @@ arch_get_unmapped_area_topdown(struct fi
if (flags & MAP_FIXED)
return addr;
@@ -52910,7 +53476,37 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
-@@ -1414,13 +1574,21 @@ bottomup:
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -1378,7 +1569,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -1395,7 +1586,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+@@ -1414,13 +1605,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -52934,7 +53530,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->cached_hole_size = ~0UL;
return addr;
-@@ -1429,6 +1597,12 @@ bottomup:
+@@ -1429,6 +1628,12 @@ bottomup:
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -52947,7 +53543,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1436,8 +1610,10 @@ void arch_unmap_area_topdown(struct mm_s
+@@ -1436,8 +1641,10 @@ void arch_unmap_area_topdown(struct mm_s
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -52959,7 +53555,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
unsigned long
-@@ -1545,6 +1721,27 @@ out:
+@@ -1545,6 +1752,27 @@ out:
return prev ? prev->vm_next : vma;
}
@@ -52987,7 +53583,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -1561,6 +1758,7 @@ static int acct_stack_growth(struct vm_a
+@@ -1561,6 +1789,7 @@ static int acct_stack_growth(struct vm_a
return -ENOMEM;
/* Stack limit test */
@@ -52995,7 +53591,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (size > rlim[RLIMIT_STACK].rlim_cur)
return -ENOMEM;
-@@ -1570,6 +1768,7 @@ static int acct_stack_growth(struct vm_a
+@@ -1570,6 +1799,7 @@ static int acct_stack_growth(struct vm_a
unsigned long limit;
locked = mm->locked_vm + grow;
limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
@@ -53003,7 +53599,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -1605,35 +1804,40 @@ static
+@@ -1605,35 +1835,42 @@ static
#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
@@ -53026,7 +53622,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
-+ if (locknext && unlikely(anon_vma_prepare(vma->vm_next)))
++ if (locknext && anon_vma_prepare(vma->vm_next))
+ return -ENOMEM;
anon_vma_lock(vma);
+ if (locknext)
@@ -53050,11 +53646,13 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* Somebody else might have raced and expanded it already */
- if (address > vma->vm_end) {
-+ if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -1643,6 +1847,8 @@ int expand_upwards(struct vm_area_struct
+@@ -1643,6 +1880,8 @@ int expand_upwards(struct vm_area_struct
if (!error)
vma->vm_end = address;
}
@@ -53063,25 +53661,25 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
anon_vma_unlock(vma);
return error;
}
-@@ -1654,7 +1860,8 @@ int expand_upwards(struct vm_area_struct
+@@ -1654,7 +1893,8 @@ int expand_upwards(struct vm_area_struct
static int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
- int error;
+ int error, lockprev = 0;
-+ struct vm_area_struct *prev = NULL;
++ struct vm_area_struct *prev;
/*
* We must make sure the anon_vma is allocated
-@@ -1668,6 +1875,15 @@ static int expand_downwards(struct vm_ar
+@@ -1668,6 +1908,15 @@ static int expand_downwards(struct vm_ar
if (error)
return error;
++ prev = vma->vm_prev;
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
-+ find_vma_prev(vma->vm_mm, address, &prev);
+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
+#endif
-+ if (lockprev && unlikely(anon_vma_prepare(prev)))
++ if (lockprev && anon_vma_prepare(prev))
+ return -ENOMEM;
+ if (lockprev)
+ anon_vma_lock(prev);
@@ -53089,12 +53687,14 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
anon_vma_lock(vma);
/*
-@@ -1677,9 +1893,15 @@ static int expand_downwards(struct vm_ar
+@@ -1677,9 +1926,17 @@ static int expand_downwards(struct vm_ar
*/
/* Somebody else might have raced and expanded it already */
- if (address < vma->vm_start) {
-+ if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
unsigned long size, grow;
+#ifdef CONFIG_PAX_SEGMEXEC
@@ -53106,7 +53706,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -1687,9 +1909,20 @@ static int expand_downwards(struct vm_ar
+@@ -1687,9 +1944,20 @@ static int expand_downwards(struct vm_ar
if (!error) {
vma->vm_start = address;
vma->vm_pgoff -= grow;
@@ -53127,7 +53727,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return error;
}
-@@ -1765,6 +1998,13 @@ static void remove_vma_list(struct mm_st
+@@ -1765,6 +2033,13 @@ static void remove_vma_list(struct mm_st
do {
long nrpages = vma_pages(vma);
@@ -53141,7 +53741,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
-@@ -1810,6 +2050,16 @@ detach_vmas_to_be_unmapped(struct mm_str
+@@ -1810,6 +2085,16 @@ detach_vmas_to_be_unmapped(struct mm_str
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -53158,7 +53758,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -1837,10 +2087,25 @@ int split_vma(struct mm_struct * mm, str
+@@ -1837,10 +2122,25 @@ int split_vma(struct mm_struct * mm, str
struct mempolicy *pol;
struct vm_area_struct *new;
@@ -53184,7 +53784,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -1848,6 +2113,16 @@ int split_vma(struct mm_struct * mm, str
+@@ -1848,6 +2148,16 @@ int split_vma(struct mm_struct * mm, str
if (!new)
return -ENOMEM;
@@ -53201,7 +53801,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -1858,8 +2133,29 @@ int split_vma(struct mm_struct * mm, str
+@@ -1858,8 +2168,29 @@ int split_vma(struct mm_struct * mm, str
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -53231,7 +53831,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
kmem_cache_free(vm_area_cachep, new);
return PTR_ERR(pol);
}
-@@ -1880,6 +2176,28 @@ int split_vma(struct mm_struct * mm, str
+@@ -1880,6 +2211,28 @@ int split_vma(struct mm_struct * mm, str
else
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -53260,7 +53860,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
}
-@@ -1888,11 +2206,30 @@ int split_vma(struct mm_struct * mm, str
+@@ -1888,11 +2241,30 @@ int split_vma(struct mm_struct * mm, str
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -53291,7 +53891,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -1956,6 +2293,8 @@ int do_munmap(struct mm_struct *mm, unsi
+@@ -1956,6 +2328,8 @@ int do_munmap(struct mm_struct *mm, unsi
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -53300,7 +53900,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
}
-@@ -1968,22 +2307,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+@@ -1968,22 +2342,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
profile_munmap(addr);
@@ -53329,7 +53929,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -1997,6 +2332,7 @@ unsigned long do_brk(unsigned long addr,
+@@ -1997,6 +2367,7 @@ unsigned long do_brk(unsigned long addr,
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -53337,7 +53937,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
len = PAGE_ALIGN(len);
if (!len)
-@@ -2008,16 +2344,30 @@ unsigned long do_brk(unsigned long addr,
+@@ -2008,16 +2379,30 @@ unsigned long do_brk(unsigned long addr,
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -53369,7 +53969,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
locked += mm->locked_vm;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
-@@ -2034,22 +2384,22 @@ unsigned long do_brk(unsigned long addr,
+@@ -2034,22 +2419,22 @@ unsigned long do_brk(unsigned long addr,
/*
* Clear old maps. this also does some error checking for us
*/
@@ -53396,7 +53996,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2063,7 +2413,7 @@ unsigned long do_brk(unsigned long addr,
+@@ -2063,7 +2448,7 @@ unsigned long do_brk(unsigned long addr,
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -53405,7 +54005,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return -ENOMEM;
}
-@@ -2075,11 +2425,12 @@ unsigned long do_brk(unsigned long addr,
+@@ -2075,11 +2460,12 @@ unsigned long do_brk(unsigned long addr,
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
@@ -53420,7 +54020,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return addr;
}
-@@ -2126,8 +2477,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2126,8 +2512,10 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
@@ -53432,7 +54032,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
-@@ -2141,6 +2494,10 @@ int insert_vm_struct(struct mm_struct *
+@@ -2141,6 +2529,10 @@ int insert_vm_struct(struct mm_struct *
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
@@ -53443,7 +54043,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2163,7 +2520,22 @@ int insert_vm_struct(struct mm_struct *
+@@ -2163,7 +2555,22 @@ int insert_vm_struct(struct mm_struct *
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -53466,7 +54066,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
}
-@@ -2181,6 +2553,8 @@ struct vm_area_struct *copy_vma(struct v
+@@ -2181,6 +2588,8 @@ struct vm_area_struct *copy_vma(struct v
struct rb_node **rb_link, *rb_parent;
struct mempolicy *pol;
@@ -53475,7 +54075,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2224,6 +2598,35 @@ struct vm_area_struct *copy_vma(struct v
+@@ -2224,6 +2633,35 @@ struct vm_area_struct *copy_vma(struct v
return new_vma;
}
@@ -53511,7 +54111,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2234,7 +2637,7 @@ int may_expand_vm(struct mm_struct *mm,
+@@ -2234,7 +2672,7 @@ int may_expand_vm(struct mm_struct *mm,
unsigned long lim;
lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
@@ -53520,7 +54120,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (cur + npages > lim)
return 0;
return 1;
-@@ -2303,6 +2706,17 @@ int install_special_mapping(struct mm_st
+@@ -2303,6 +2741,17 @@ int install_special_mapping(struct mm_st
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -53540,7 +54140,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
--- linux-2.6.32.21/mm/mprotect.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/mm/mprotect.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/mm/mprotect.c 2010-09-17 18:34:04.000000000 -0400
@@ -24,10 +24,16 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
@@ -53607,7 +54207,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
-@@ -144,6 +192,14 @@ mprotect_fixup(struct vm_area_struct *vm
+@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
int error;
int dirty_accountable = 0;
@@ -53622,7 +54222,22 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
if (newflags == oldflags) {
*pprev = vma;
return 0;
-@@ -165,6 +221,38 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+
++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++ return -ENOMEM;
++
++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++ return -ENOMEM;
++ }
++
+ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
}
}
@@ -53661,7 +54276,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
/*
* First try to merge with previous and/or next vma.
*/
-@@ -195,9 +283,21 @@ success:
+@@ -195,9 +293,21 @@ success:
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
@@ -53684,7 +54299,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
if (vma_wants_writenotify(vma)) {
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
-@@ -238,6 +338,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -238,6 +348,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
end = start + len;
if (end <= start)
return -ENOMEM;
@@ -53702,7 +54317,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
if (!arch_validate_prot(prot))
return -EINVAL;
-@@ -245,7 +356,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -245,7 +366,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
@@ -53711,7 +54326,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
-@@ -277,6 +388,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -277,6 +398,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
if (start > vma->vm_start)
prev = vma;
@@ -53728,7 +54343,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
for (nstart = start ; ; ) {
unsigned long newflags;
-@@ -301,6 +422,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -301,6 +432,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
if (error)
goto out;
perf_event_mmap(vma);
@@ -53843,8 +54458,16 @@ diff -urNp linux-2.6.32.21/mm/mremap.c linux-2.6.32.21/mm/mremap.c
if (ret & ~PAGE_MASK)
diff -urNp linux-2.6.32.21/mm/nommu.c linux-2.6.32.21/mm/nommu.c
--- linux-2.6.32.21/mm/nommu.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/nommu.c 2010-09-04 15:54:52.000000000 -0400
-@@ -761,15 +761,6 @@ struct vm_area_struct *find_vma(struct m
++++ linux-2.6.32.21/mm/nommu.c 2010-09-17 18:34:04.000000000 -0400
+@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+
+ atomic_long_t mmap_pages_allocated;
+
+@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
EXPORT_SYMBOL(find_vma);
/*
@@ -56492,7 +57115,7 @@ diff -urNp linux-2.6.32.21/security/integrity/ima/ima_queue.c linux-2.6.32.21/se
return 0;
diff -urNp linux-2.6.32.21/security/Kconfig linux-2.6.32.21/security/Kconfig
--- linux-2.6.32.21/security/Kconfig 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/security/Kconfig 2010-09-14 20:52:17.000000000 -0400
++++ linux-2.6.32.21/security/Kconfig 2010-09-17 17:39:35.000000000 -0400
@@ -4,6 +4,505 @@
menu "Security options"
@@ -56516,7 +57139,7 @@ diff -urNp linux-2.6.32.21/security/Kconfig linux-2.6.32.21/security/Kconfig
+
+config PAX
+ bool "Enable various PaX features"
-+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
+ help
+ This allows you to enable various PaX features. PaX adds
+ intrusion prevention mechanisms to the kernel that reduce
diff --git a/2.6.32/4425_grsec-pax-without-grsec.patch b/2.6.32/4425_grsec-pax-without-grsec.patch
index 18fa48e..578f33a 100644
--- a/2.6.32/4425_grsec-pax-without-grsec.patch
+++ b/2.6.32/4425_grsec-pax-without-grsec.patch
@@ -54,7 +54,7 @@ The original version of this patch contained no credits/description.
current->comm, task_pid_nr(current), current_uid(), current_euid());
print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
@@ -1838,10 +1842,12 @@
- #ifdef CONFIG_PAX_USERCOPY
+
void pax_report_leak_to_user(const void *ptr, unsigned long len)
{
+#ifdef CONFIG_GRKERNSEC
@@ -82,11 +82,11 @@ The original version of this patch contained no credits/description.
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -23,7 +23,7 @@
-
+
config PAX
bool "Enable various PaX features"
-- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
-+ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
+- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
help
This allows you to enable various PaX features. PaX adds
intrusion prevention mechanisms to the kernel that reduce
diff --git a/2.6.32/4430_grsec-kconfig-default-gids.patch b/2.6.32/4430_grsec-kconfig-default-gids.patch
index b7a0413..7ba8aa2 100644
--- a/2.6.32/4430_grsec-kconfig-default-gids.patch
+++ b/2.6.32/4430_grsec-kconfig-default-gids.patch
@@ -29,25 +29,25 @@ from shooting themselves in the foot.
config GRKERNSEC_EXECLOG
bool "Exec logging"
-@@ -780,7 +780,7 @@
+@@ -785,7 +785,7 @@
config GRKERNSEC_TPE_GID
int "GID for untrusted users"
depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
- default 1005
+ default 100
help
- If you have selected the "Invert GID option" above, setting this
- GID determines what group TPE restrictions will be *disabled* for.
-@@ -792,7 +792,7 @@
+ Setting this GID determines what group TPE restrictions will be
+ *enabled* for. If the sysctl option is enabled, a sysctl option
+@@ -794,7 +794,7 @@
config GRKERNSEC_TPE_GID
int "GID for trusted users"
depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
- default 1005
+ default 10
help
- If you have selected the "Invert GID option" above, setting this
- GID determines what group TPE restrictions will be *disabled* for.
-@@ -866,7 +866,7 @@
+ Setting this GID determines what group TPE restrictions will be
+ *disabled* for. If the sysctl option is enabled, a sysctl option
+@@ -865,7 +865,7 @@
config GRKERNSEC_SOCKET_ALL_GID
int "GID to deny all sockets for"
depends on GRKERNSEC_SOCKET_ALL
@@ -56,7 +56,7 @@ from shooting themselves in the foot.
help
Here you can choose the GID to disable socket access for. Remember to
add the users you want socket access disabled for to the GID
-@@ -887,7 +887,7 @@
+@@ -886,7 +886,7 @@
config GRKERNSEC_SOCKET_CLIENT_GID
int "GID to deny client sockets for"
depends on GRKERNSEC_SOCKET_CLIENT
@@ -65,7 +65,7 @@ from shooting themselves in the foot.
help
Here you can choose the GID to disable client socket access for.
Remember to add the users you want client socket access disabled for to
-@@ -905,7 +905,7 @@
+@@ -904,7 +904,7 @@
config GRKERNSEC_SOCKET_SERVER_GID
int "GID to deny server sockets for"
depends on GRKERNSEC_SOCKET_SERVER
diff --git a/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch b/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch
index 9c7f7be..aa2403a 100644
--- a/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch
+++ b/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch
@@ -21,7 +21,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
--- a/grsecurity/Kconfig
+++ b/grsecurity/Kconfig
-@@ -1372,6 +1372,27 @@
+@@ -1371,6 +1371,27 @@
menu "Logging Options"
depends on GRKERNSEC
diff --git a/2.6.34/0000_README b/2.6.34/0000_README
index 596261e..64ae95b 100644
--- a/2.6.34/0000_README
+++ b/2.6.34/0000_README
@@ -3,7 +3,7 @@ README
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch
+Patch: 4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch b/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch
index 26f3ea8..07190cf 100644
--- a/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch
+++ b/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch
@@ -65,7 +65,16 @@ diff -urNp linux-2.6.34.7/arch/alpha/kernel/module.c linux-2.6.34.7/arch/alpha/k
for (i = 0; i < n; i++) {
diff -urNp linux-2.6.34.7/arch/alpha/kernel/osf_sys.c linux-2.6.34.7/arch/alpha/kernel/osf_sys.c
--- linux-2.6.34.7/arch/alpha/kernel/osf_sys.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/alpha/kernel/osf_sys.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/alpha/kernel/osf_sys.c 2010-09-17 18:52:03.000000000 -0400
+@@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
@@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
@@ -513,7 +522,7 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/fault.c linux-2.6.34.7/arch/arm/mm/fault.c
*
diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c
--- linux-2.6.34.7/arch/arm/mm/mmap.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/arm/mm/mmap.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/arm/mm/mmap.c 2010-09-17 18:52:03.000000000 -0400
@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
if (len > TASK_SIZE)
return -ENOMEM;
@@ -525,7 +534,13 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -75,10 +79,10 @@ arch_get_unmapped_area(struct file *filp
+@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
return addr;
}
if (len > mm->cached_hole_size) {
@@ -539,7 +554,7 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c
}
full_search:
-@@ -94,8 +98,8 @@ full_search:
+@@ -94,14 +97,14 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -550,6 +565,13 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c
mm->cached_hole_size = 0;
goto full_search;
}
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
diff -urNp linux-2.6.34.7/arch/arm/plat-samsung/pm.c linux-2.6.34.7/arch/arm/plat-samsung/pm.c
--- linux-2.6.34.7/arch/arm/plat-samsung/pm.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/arm/plat-samsung/pm.c 2010-08-13 18:38:11.000000000 -0400
@@ -706,6 +728,37 @@ diff -urNp linux-2.6.34.7/arch/frv/include/asm/kmap_types.h linux-2.6.34.7/arch/
KM_TYPE_NR
};
+diff -urNp linux-2.6.34.7/arch/frv/mm/elf-fdpic.c linux-2.6.34.7/arch/frv/mm/elf-fdpic.c
+--- linux-2.6.34.7/arch/frv/mm/elf-fdpic.c 2010-08-13 16:29:15.000000000 -0400
++++ linux-2.6.34.7/arch/frv/mm/elf-fdpic.c 2010-09-17 18:52:03.000000000 -0400
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
diff -urNp linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c
--- linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 18:38:11.000000000 -0400
@@ -1075,7 +1128,7 @@ diff -urNp linux-2.6.34.7/arch/ia64/kernel/pci-swiotlb.c linux-2.6.34.7/arch/ia6
.map_page = swiotlb_map_page,
diff -urNp linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c
--- linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c 2010-09-17 18:52:03.000000000 -0400
@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
if (REGION_NUMBER(addr) == RGN_HPAGE)
addr = 0;
@@ -1090,7 +1143,7 @@ diff -urNp linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c linux-2.6.34.7/arch/ia64/k
if (!addr)
addr = mm->free_area_cache;
-@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -1102,6 +1155,12 @@ diff -urNp linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c linux-2.6.34.7/arch/ia64/k
goto full_search;
}
return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
diff -urNp linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S
--- linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S 2010-08-13 18:38:11.000000000 -0400
@@ -1166,6 +1225,18 @@ diff -urNp linux-2.6.34.7/arch/ia64/mm/fault.c linux-2.6.34.7/arch/ia64/mm/fault
survive:
/*
* If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c
+--- linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c 2010-08-13 16:29:15.000000000 -0400
++++ linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c 2010-09-17 18:52:03.000000000 -0400
+@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
diff -urNp linux-2.6.34.7/arch/ia64/mm/init.c linux-2.6.34.7/arch/ia64/mm/init.c
--- linux-2.6.34.7/arch/ia64/mm/init.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/ia64/mm/init.c 2010-08-13 18:38:11.000000000 -0400
@@ -1487,8 +1558,8 @@ diff -urNp linux-2.6.34.7/arch/mips/kernel/process.c linux-2.6.34.7/arch/mips/ke
-}
diff -urNp linux-2.6.34.7/arch/mips/kernel/syscall.c linux-2.6.34.7/arch/mips/kernel/syscall.c
--- linux-2.6.34.7/arch/mips/kernel/syscall.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/mips/kernel/syscall.c 2010-08-13 18:38:11.000000000 -0400
-@@ -106,6 +106,11 @@ unsigned long arch_get_unmapped_area(str
++++ linux-2.6.34.7/arch/mips/kernel/syscall.c 2010-09-17 18:52:03.000000000 -0400
+@@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(str
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
@@ -1500,8 +1571,12 @@ diff -urNp linux-2.6.34.7/arch/mips/kernel/syscall.c linux-2.6.34.7/arch/mips/ke
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -116,7 +121,7 @@ unsigned long arch_get_unmapped_area(str
- (!vmm || addr + len <= vmm->vm_start))
+ else
+ addr = PAGE_ALIGN(addr);
+ vmm = find_vma(current->mm, addr);
+- if (task_size - len >= addr &&
+- (!vmm || addr + len <= vmm->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
return addr;
}
- addr = TASK_UNMAPPED_BASE;
@@ -1509,6 +1584,15 @@ diff -urNp linux-2.6.34.7/arch/mips/kernel/syscall.c linux-2.6.34.7/arch/mips/ke
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
+@@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(str
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (task_size - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (do_color_align)
diff -urNp linux-2.6.34.7/arch/mips/loongson/common/pm.c linux-2.6.34.7/arch/mips/loongson/common/pm.c
--- linux-2.6.34.7/arch/mips/loongson/common/pm.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/mips/loongson/common/pm.c 2010-08-13 18:38:11.000000000 -0400
@@ -1703,7 +1787,25 @@ diff -urNp linux-2.6.34.7/arch/parisc/kernel/module.c linux-2.6.34.7/arch/parisc
me->arch.unwind_section, table, end, gp);
diff -urNp linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c
--- linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c 2010-09-17 18:52:03.000000000 -0400
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
if (flags & MAP_FIXED)
return addr;
@@ -2830,8 +2932,38 @@ diff -urNp linux-2.6.34.7/arch/powerpc/mm/mmap_64.c linux-2.6.34.7/arch/powerpc/
}
diff -urNp linux-2.6.34.7/arch/powerpc/mm/slice.c linux-2.6.34.7/arch/powerpc/mm/slice.c
--- linux-2.6.34.7/arch/powerpc/mm/slice.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/powerpc/mm/slice.c 2010-08-13 18:38:11.000000000 -0400
-@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un
++++ linux-2.6.34.7/arch/powerpc/mm/slice.c 2010-09-17 18:52:03.000000000 -0400
+@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+ {
+ return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
+ 1ul << SLICE_LOW_SHIFT);
+@@ -256,7 +255,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un
if (fixed && addr > (mm->task_size - len))
return -EINVAL;
@@ -3316,6 +3448,56 @@ diff -urNp linux-2.6.34.7/arch/sh/mm/consistent.c linux-2.6.34.7/arch/sh/mm/cons
EXPORT_SYMBOL(dma_ops);
static int __init dma_init(void)
+diff -urNp linux-2.6.34.7/arch/sh/mm/mmap.c linux-2.6.34.7/arch/sh/mm/mmap.c
+--- linux-2.6.34.7/arch/sh/mm/mmap.c 2010-08-13 16:29:15.000000000 -0400
++++ linux-2.6.34.7/arch/sh/mm/mmap.c 2010-09-17 18:52:03.000000000 -0400
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
diff -urNp linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h
--- linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h 2010-08-29 21:16:43.000000000 -0400
+++ linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h 2010-09-15 02:25:59.000000000 -0400
@@ -3873,7 +4055,7 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/pci_sun4v.c linux-2.6.34.7/arch/spar
.map_page = dma_4v_map_page,
diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c
--- linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 18:52:03.000000000 -0400
@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
if (ARCH_SUN4C && len > 0x20000000)
return -ENOMEM;
@@ -3883,9 +4065,18 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c linux-2.6.34.7/arch/s
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr);
+@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c
--- linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 18:52:03.000000000 -0400
@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -3906,7 +4097,14 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -152,9 +156,9 @@ unsigned long arch_get_unmapped_area(str
+@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
}
if (len > mm->cached_hole_size) {
@@ -3918,7 +4116,7 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s
mm->cached_hole_size = 0;
}
-@@ -174,8 +178,8 @@ full_search:
+@@ -174,14 +177,14 @@ full_search:
vma = find_vma(mm, VA_EXCLUDE_END);
}
if (unlikely(task_size < addr)) {
@@ -3929,7 +4127,14 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -215,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
@@ -3938,7 +4143,35 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
-@@ -385,6 +389,12 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_str
gap == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -3951,7 +4184,7 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
-@@ -397,6 +407,12 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_str
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -4237,8 +4470,8 @@ diff -urNp linux-2.6.34.7/arch/sparc/lib/atomic_64.S linux-2.6.34.7/arch/sparc/l
bne,pn %xcc, 2f
diff -urNp linux-2.6.34.7/arch/sparc/lib/ksyms.c linux-2.6.34.7/arch/sparc/lib/ksyms.c
--- linux-2.6.34.7/arch/sparc/lib/ksyms.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/sparc/lib/ksyms.c 2010-08-13 18:38:11.000000000 -0400
-@@ -142,12 +142,15 @@ EXPORT_SYMBOL(__downgrade_write);
++++ linux-2.6.34.7/arch/sparc/lib/ksyms.c 2010-09-17 18:05:15.000000000 -0400
+@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
/* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add);
@@ -4248,7 +4481,9 @@ diff -urNp linux-2.6.34.7/arch/sparc/lib/ksyms.c linux-2.6.34.7/arch/sparc/lib/k
+EXPORT_SYMBOL(atomic_sub_unchecked);
EXPORT_SYMBOL(atomic_sub_ret);
EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_unchecked);
EXPORT_SYMBOL(atomic64_sub_ret);
@@ -5175,6 +5410,46 @@ diff -urNp linux-2.6.34.7/arch/sparc/mm/fault_64.c linux-2.6.34.7/arch/sparc/mm/
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c
+--- linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c 2010-08-13 16:29:15.000000000 -0400
++++ linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c 2010-09-17 18:52:03.000000000 -0400
+@@ -68,7 +68,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
diff -urNp linux-2.6.34.7/arch/sparc/mm/init_32.c linux-2.6.34.7/arch/sparc/mm/init_32.c
--- linux-2.6.34.7/arch/sparc/mm/init_32.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/sparc/mm/init_32.c 2010-08-13 18:38:11.000000000 -0400
@@ -9336,7 +9611,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess_64.h linux-2.6.34.7/arch/
#endif /* _ASM_X86_UACCESS_64_H */
diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86/include/asm/uaccess.h
--- linux-2.6.34.7/arch/x86/include/asm/uaccess.h 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/x86/include/asm/uaccess.h 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/x86/include/asm/uaccess.h 2010-09-17 18:00:42.000000000 -0400
@@ -8,12 +8,15 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
@@ -9401,22 +9676,9 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
/*
* The exception table consists of pairs of addresses: the first is the
-@@ -179,17 +213,34 @@ extern int __get_user_bad(void);
- __ret_gu; \
- })
-
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __put_user_x(size, x, ptr, __ret_pu) \
-+ ({ \
-+ int __dummy; \
-+ asm volatile("call __put_user_" #size : "=a" (__ret_pu), "=c" (__dummy) \
-+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx"); \
-+ })
-+#else
- #define __put_user_x(size, x, ptr, __ret_pu) \
+@@ -183,13 +217,21 @@ extern int __get_user_bad(void);
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
-+#endif
-
+#ifdef CONFIG_X86_32
@@ -9439,7 +9701,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
-@@ -197,15 +248,18 @@ extern int __get_user_bad(void);
+@@ -197,15 +239,18 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
@@ -9462,7 +9724,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
-@@ -374,16 +428,18 @@ do { \
+@@ -374,16 +419,18 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -9484,7 +9746,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
#define __get_user_size_ex(x, ptr, size) \
do { \
-@@ -407,10 +463,12 @@ do { \
+@@ -407,10 +454,12 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -9499,7 +9761,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
#define __put_user_nocheck(x, ptr, size) \
({ \
-@@ -424,13 +482,24 @@ do { \
+@@ -424,13 +473,24 @@ do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
@@ -9526,7 +9788,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
/*
* Tell gcc we read from memory instead of writing: this is because
-@@ -438,21 +507,26 @@ struct __large_struct { unsigned long bu
+@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -9557,7 +9819,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
/*
* uaccess_try and catch
-@@ -530,7 +604,7 @@ struct __large_struct { unsigned long bu
+@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
@@ -9566,7 +9828,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86
} while (0)
#ifdef CONFIG_X86_WP_WORKS_OK
-@@ -567,6 +641,7 @@ extern struct movsl_mask {
+@@ -567,6 +632,7 @@ extern struct movsl_mask {
#define ARCH_HAS_NOCACHE_UACCESS 1
@@ -13542,7 +13804,26 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/signal.c linux-2.6.34.7/arch/x86/kerne
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
diff -urNp linux-2.6.34.7/arch/x86/kernel/smpboot.c linux-2.6.34.7/arch/x86/kernel/smpboot.c
--- linux-2.6.34.7/arch/x86/kernel/smpboot.c 2010-08-29 21:16:43.000000000 -0400
-+++ linux-2.6.34.7/arch/x86/kernel/smpboot.c 2010-08-29 21:17:11.000000000 -0400
++++ linux-2.6.34.7/arch/x86/kernel/smpboot.c 2010-09-17 18:04:17.000000000 -0400
+@@ -98,14 +98,14 @@ static DEFINE_PER_CPU(struct task_struct
+ */
+ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
+
+-void cpu_hotplug_driver_lock()
++void cpu_hotplug_driver_lock(void)
+ {
+- mutex_lock(&x86_cpu_hotplug_driver_mutex);
++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
+ }
+
+-void cpu_hotplug_driver_unlock()
++void cpu_hotplug_driver_unlock(void)
+ {
+- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
+ }
+
+ ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
@@ -780,7 +780,11 @@ do_rest:
(unsigned long)task_stack_page(c_idle.idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
@@ -13613,8 +13894,8 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/syscall_table_32.S linux-2.6.34.7/arch
.long sys_exit
diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c
--- linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c 2010-08-13 18:38:11.000000000 -0400
-@@ -24,6 +24,221 @@
++++ linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c 2010-09-17 18:52:03.000000000 -0400
+@@ -24,6 +24,224 @@
#include <asm/syscalls.h>
@@ -13658,10 +13939,11 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
-+ vma = find_vma(mm, addr);
-+ if (pax_task_size - len >= addr &&
-+ (!vma || addr + len <= vma->vm_start))
-+ return addr;
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
@@ -13701,13 +13983,8 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
+ }
+ return -ENOMEM;
+ }
-+ if (!vma || addr + len <= vma->vm_start) {
-+ /*
-+ * Remember the place where we stopped the search:
-+ */
-+ mm->free_area_cache = addr + len;
-+ return addr;
-+ }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
@@ -13717,6 +13994,12 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
+ goto full_search;
+ }
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+}
+
+unsigned long
@@ -13752,10 +14035,11 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
-+ vma = find_vma(mm, addr);
-+ if (pax_task_size - len >= addr &&
-+ (!vma || addr + len <= vma->vm_start))
-+ return addr;
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
@@ -13770,7 +14054,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
-+ if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
@@ -13787,7 +14071,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
-+ if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
@@ -13838,7 +14122,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/
* end up with proper pt_regs.
diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c
--- linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c 2010-09-17 18:52:03.000000000 -0400
@@ -32,8 +32,8 @@ out:
return error;
}
@@ -13859,7 +14143,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k
*end = TASK_SIZE;
}
}
-@@ -69,11 +69,15 @@ arch_get_unmapped_area(struct file *filp
+@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -13876,7 +14160,22 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
-@@ -128,7 +132,7 @@ arch_get_unmapped_area_topdown(struct fi
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+@@ -106,7 +109,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -13885,7 +14184,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -141,6 +145,10 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
goto bottomup;
@@ -13896,7 +14195,32 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
-@@ -198,13 +206,21 @@ bottomup:
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr-len;
+ }
+@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -198,13 +204,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -17862,7 +18186,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/highmem_32.c linux-2.6.34.7/arch/x86/mm/hi
}
diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/hugetlbpage.c
--- linux-2.6.34.7/arch/x86/mm/hugetlbpage.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/x86/mm/hugetlbpage.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/x86/mm/hugetlbpage.c 2010-09-17 18:52:03.000000000 -0400
@@ -266,13 +266,18 @@ static unsigned long hugetlb_get_unmappe
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
@@ -17886,7 +18210,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h
}
full_search:
-@@ -280,13 +285,13 @@ full_search:
+@@ -280,26 +285,27 @@ full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@@ -17903,18 +18227,38 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -309,9 +314,8 @@ static unsigned long hugetlb_get_unmappe
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
++
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -308,10 +314,9 @@ static unsigned long hugetlb_get_unmappe
+ {
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev_vma;
+- struct vm_area_struct *vma, *prev_vma;
- unsigned long base = mm->mmap_base, addr = addr0;
++ struct vm_area_struct *vma;
+ unsigned long base = mm->mmap_base, addr;
unsigned long largest_hole = mm->cached_hole_size;
- int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
-@@ -321,7 +325,7 @@ static unsigned long hugetlb_get_unmappe
+@@ -321,7 +326,7 @@ static unsigned long hugetlb_get_unmappe
largest_hole = 0;
mm->free_area_cache = base;
}
@@ -17923,7 +18267,51 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
goto fail;
-@@ -363,22 +367,26 @@ try_again:
+@@ -329,33 +334,27 @@ try_again:
+ /* either no address requested or cant fit in requested address hole */
+ addr = (mm->free_area_cache - len) & huge_page_mask(h);
+ do {
++ vma = find_vma(mm, addr);
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+- */
+- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+- return addr;
+-
+- /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+- if (addr + len <= vma->vm_start &&
+- (!prev_vma || (addr >= prev_vma->vm_end))) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+- mm->cached_hole_size = largest_hole;
+- return (mm->free_area_cache = addr);
+- } else {
+- /* pull free_area_cache down to the first hole */
+- if (mm->free_area_cache == vma->vm_end) {
+- mm->free_area_cache = vma->vm_start;
+- mm->cached_hole_size = largest_hole;
+- }
++ mm->cached_hole_size = largest_hole;
++ return (mm->free_area_cache = addr);
++ }
++ /* pull free_area_cache down to the first hole */
++ if (mm->free_area_cache == vma->vm_end) {
++ mm->free_area_cache = vma->vm_start;
++ mm->cached_hole_size = largest_hole;
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+- largest_hole = vma->vm_start - addr;
++ largest_hole = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = (vma->vm_start - len) & huge_page_mask(h);
+@@ -363,22 +362,26 @@ try_again:
fail:
/*
@@ -17961,7 +18349,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h
mm->cached_hole_size = ~0UL;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
-@@ -386,6 +394,7 @@ fail:
+@@ -386,6 +389,7 @@ fail:
/*
* Restore the topdown base:
*/
@@ -17969,7 +18357,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
-@@ -399,10 +408,17 @@ hugetlb_get_unmapped_area(struct file *f
+@@ -399,10 +403,17 @@ hugetlb_get_unmapped_area(struct file *f
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -17988,15 +18376,16 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h
return -ENOMEM;
if (flags & MAP_FIXED) {
-@@ -414,7 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
+@@ -414,8 +425,7 @@ hugetlb_get_unmapped_area(struct file *f
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
-+ if (pax_task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+- (!vma || addr + len <= vma->vm_start))
++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
return addr;
}
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
diff -urNp linux-2.6.34.7/arch/x86/mm/init_32.c linux-2.6.34.7/arch/x86/mm/init_32.c
--- linux-2.6.34.7/arch/x86/mm/init_32.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/arch/x86/mm/init_32.c 2010-08-13 18:38:11.000000000 -0400
@@ -19938,7 +20327,7 @@ diff -urNp linux-2.6.34.7/arch/x86/vdso/vma.c linux-2.6.34.7/arch/x86/vdso/vma.c
-__setup("vdso=", vdso_setup);
diff -urNp linux-2.6.34.7/arch/x86/xen/enlighten.c linux-2.6.34.7/arch/x86/xen/enlighten.c
--- linux-2.6.34.7/arch/x86/xen/enlighten.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/arch/x86/xen/enlighten.c 2010-08-13 18:38:11.000000000 -0400
++++ linux-2.6.34.7/arch/x86/xen/enlighten.c 2010-09-17 18:02:09.000000000 -0400
@@ -74,8 +74,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
@@ -19964,7 +20353,7 @@ diff -urNp linux-2.6.34.7/arch/x86/xen/enlighten.c linux-2.6.34.7/arch/x86/xen/e
- x86_configure_nx();
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
-+ (cpuid_edx(0x80000001) & (1 << (X86_FEATURE_NX & 31)))) {
++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
+ unsigned l, h;
+
+ __supported_pte_mask |= _PAGE_NX;
@@ -30530,19 +30919,6 @@ diff -urNp linux-2.6.34.7/fs/ext4/balloc.c linux-2.6.34.7/fs/ext4/balloc.c
if (free_blocks >= (nblocks + dirty_blocks))
return 1;
}
-diff -urNp linux-2.6.34.7/fs/ext4/ioctl.c linux-2.6.34.7/fs/ext4/ioctl.c
---- linux-2.6.34.7/fs/ext4/ioctl.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/fs/ext4/ioctl.c 2010-08-13 18:38:12.000000000 -0400
-@@ -230,6 +230,9 @@ setversion_out:
- struct file *donor_filp;
- int err;
-
-+ /* temporary workaround for bugs in here */
-+ return -EOPNOTSUPP;
-+
- if (!(filp->f_mode & FMODE_READ) ||
- !(filp->f_mode & FMODE_WRITE))
- return -EBADF;
diff -urNp linux-2.6.34.7/fs/ext4/namei.c linux-2.6.34.7/fs/ext4/namei.c
--- linux-2.6.34.7/fs/ext4/namei.c 2010-08-13 16:29:15.000000000 -0400
+++ linux-2.6.34.7/fs/ext4/namei.c 2010-08-13 18:38:12.000000000 -0400
@@ -33059,7 +33435,7 @@ diff -urNp linux-2.6.34.7/fs/proc/root.c linux-2.6.34.7/fs/proc/root.c
diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c
--- linux-2.6.34.7/fs/proc/task_mmu.c 2010-08-29 21:16:40.000000000 -0400
-+++ linux-2.6.34.7/fs/proc/task_mmu.c 2010-08-13 18:39:46.000000000 -0400
++++ linux-2.6.34.7/fs/proc/task_mmu.c 2010-09-17 18:39:47.000000000 -0400
@@ -49,8 +49,13 @@ void task_mem(struct seq_file *m, struct
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
@@ -33104,22 +33480,30 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
-@@ -221,19 +238,29 @@ static void show_map_vma(struct seq_file
+@@ -210,7 +227,6 @@ static void show_map_vma(struct seq_file
+ int flags = vma->vm_flags;
+ unsigned long ino = 0;
+ unsigned long long pgoff = 0;
+- unsigned long start;
+ dev_t dev = 0;
+ int len;
+
+@@ -221,19 +237,24 @@ static void show_map_vma(struct seq_file
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
-+
- /* We don't show the stack guard page in /proc/maps */
- start = vma->vm_start;
- if (vma->vm_flags & VM_GROWSDOWN)
- start += PAGE_SIZE;
+- /* We don't show the stack guard page in /proc/maps */
+- start = vma->vm_start;
+- if (vma->vm_flags & VM_GROWSDOWN)
+- start += PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+- start,
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ PAX_RAND_FLAGS(mm) ? 0UL : start,
++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
+#else
- start,
++ vma->vm_start,
vma->vm_end,
+#endif
flags & VM_READ ? 'r' : '-',
@@ -33134,7 +33518,7 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c
MAJOR(dev), MINOR(dev), ino, &len);
/*
-@@ -242,16 +269,16 @@ static void show_map_vma(struct seq_file
+@@ -242,16 +263,16 @@ static void show_map_vma(struct seq_file
*/
if (file) {
pad_len_spaces(m, len);
@@ -33156,7 +33540,7 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c
name = "[stack]";
}
} else {
-@@ -393,11 +420,16 @@ static int show_smap(struct seq_file *m,
+@@ -393,11 +414,16 @@ static int show_smap(struct seq_file *m,
};
memset(&mss, 0, sizeof mss);
@@ -33178,7 +33562,7 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c
show_map_vma(m, vma);
seq_printf(m,
-@@ -412,7 +444,11 @@ static int show_smap(struct seq_file *m,
+@@ -412,7 +438,11 @@ static int show_smap(struct seq_file *m,
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n",
@@ -40282,8 +40666,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_fork.c linux-2.6.34.7/grsecurity/grse
+}
diff -urNp linux-2.6.34.7/grsecurity/grsec_init.c linux-2.6.34.7/grsecurity/grsec_init.c
--- linux-2.6.34.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.34.7/grsecurity/grsec_init.c 2010-08-13 18:38:12.000000000 -0400
-@@ -0,0 +1,258 @@
++++ linux-2.6.34.7/grsecurity/grsec_init.c 2010-09-17 19:44:58.000000000 -0400
+@@ -0,0 +1,266 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
@@ -40332,6 +40716,7 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_init.c linux-2.6.34.7/grsecurity/grse
+#endif
+int grsec_lastack_retries;
+int grsec_enable_tpe_all;
++int grsec_enable_tpe_invert;
+int grsec_enable_socket_all;
+int grsec_socket_all_gid;
+int grsec_enable_socket_client;
@@ -40422,6 +40807,13 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_init.c linux-2.6.34.7/grsecurity/grse
+#endif
+#endif
+
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ /* for backward compatibility, tpe_invert always defaults to on if
++ enabled in the kernel
++ */
++ grsec_enable_tpe_invert = 1;
++#endif
++
+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
+#ifndef CONFIG_GRKERNSEC_SYSCTL
+ grsec_lock = 1;
@@ -41418,8 +41810,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_sock.c linux-2.6.34.7/grsecurity/grse
+}
diff -urNp linux-2.6.34.7/grsecurity/grsec_sysctl.c linux-2.6.34.7/grsecurity/grsec_sysctl.c
--- linux-2.6.34.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.34.7/grsecurity/grsec_sysctl.c 2010-08-13 18:38:12.000000000 -0400
-@@ -0,0 +1,415 @@
++++ linux-2.6.34.7/grsecurity/grsec_sysctl.c 2010-09-17 19:45:17.000000000 -0400
+@@ -0,0 +1,424 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
@@ -41668,6 +42060,15 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_sysctl.c linux-2.6.34.7/grsecurity/gr
+ .proc_handler = &proc_dointvec,
+ },
+#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ {
++ .procname = "tpe_invert",
++ .data = &grsec_enable_tpe_invert,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+ {
+ .procname = "tpe_restrict_all",
@@ -41874,8 +42275,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_time.c linux-2.6.34.7/grsecurity/grse
+}
diff -urNp linux-2.6.34.7/grsecurity/grsec_tpe.c linux-2.6.34.7/grsecurity/grsec_tpe.c
--- linux-2.6.34.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.34.7/grsecurity/grsec_tpe.c 2010-08-13 18:38:12.000000000 -0400
-@@ -0,0 +1,38 @@
++++ linux-2.6.34.7/grsecurity/grsec_tpe.c 2010-09-17 19:44:58.000000000 -0400
+@@ -0,0 +1,39 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
@@ -41893,7 +42294,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_tpe.c linux-2.6.34.7/grsecurity/grsec
+
+ if (cred->uid && ((grsec_enable_tpe &&
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+ !in_group_p(grsec_tpe_gid)
++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
+#else
+ in_group_p(grsec_tpe_gid)
+#endif
@@ -41981,8 +42383,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsum.c linux-2.6.34.7/grsecurity/grsum.c
+}
diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig
--- linux-2.6.34.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.34.7/grsecurity/Kconfig 2010-09-15 02:12:22.000000000 -0400
-@@ -0,0 +1,987 @@
++++ linux-2.6.34.7/grsecurity/Kconfig 2010-09-17 19:44:58.000000000 -0400
+@@ -0,0 +1,986 @@
+#
+# grecurity configuration
+#
@@ -42134,7 +42536,7 @@ diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig
+ select PAX_PT_PAX_FLAGS
+ select PAX_HAVE_ACL_FLAGS
+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
-+ select PAX_MEMORY_UDEREF if (X86_32 && !XEN)
++ select PAX_MEMORY_UDEREF if (X86 && !XEN)
+ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
+ select PAX_SEGMEXEC if (X86_32)
+ select PAX_PAGEEXEC
@@ -42743,11 +43145,14 @@ diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig
+ is enabled, a sysctl option with name "tpe" is created.
+
+config GRKERNSEC_TPE_ALL
-+ bool "Partially restrict non-root users"
++ bool "Partially restrict all non-root users"
+ depends on GRKERNSEC_TPE
+ help
-+ If you say Y here, All non-root users other than the ones in the
-+ group specified in the main TPE option will only be allowed to
++ If you say Y here, all non-root users will be covered under
++ a weaker TPE restriction. This is separate from, and in addition to,
++ the main TPE options that you have selected elsewhere. Thus, if a
++ "trusted" GID is chosen, this restriction applies to even that GID.
++ Under this restriction, all non-root users will only be allowed to
+ execute files in directories they own that are not group or
+ world-writable, or in directories owned by root and writable only by
+ root. If the sysctl option is enabled, a sysctl option with name
@@ -42760,31 +43165,27 @@ diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig
+ If you say Y here, the group you specify in the TPE configuration will
+ decide what group TPE restrictions will be *disabled* for. This
+ option is useful if you want TPE restrictions to be applied to most
-+ users on the system.
++ users on the system. If the sysctl option is enabled, a sysctl option
++ with name "tpe_invert" is created. Unlike other sysctl options, this
++ entry will default to on for backward-compatibility.
+
+config GRKERNSEC_TPE_GID
+ int "GID for untrusted users"
+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+ default 1005
+ help
-+ If you have selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *disabled* for.
-+ If you have not selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *enabled* for.
-+ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+ is created.
++ Setting this GID determines what group TPE restrictions will be
++ *enabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
+
+config GRKERNSEC_TPE_GID
+ int "GID for trusted users"
+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+ default 1005
+ help
-+ If you have selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *disabled* for.
-+ If you have not selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *enabled* for.
-+ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+ is created.
++ Setting this GID determines what group TPE restrictions will be
++ *disabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
+
+endmenu
+menu "Network Protections"
@@ -44710,7 +45111,7 @@ diff -urNp linux-2.6.34.7/include/linux/grdefs.h linux-2.6.34.7/include/linux/gr
+#endif
diff -urNp linux-2.6.34.7/include/linux/grinternal.h linux-2.6.34.7/include/linux/grinternal.h
--- linux-2.6.34.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.34.7/include/linux/grinternal.h 2010-08-13 18:38:12.000000000 -0400
++++ linux-2.6.34.7/include/linux/grinternal.h 2010-09-17 19:44:58.000000000 -0400
@@ -0,0 +1,211 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
@@ -44776,7 +45177,7 @@ diff -urNp linux-2.6.34.7/include/linux/grinternal.h linux-2.6.34.7/include/linu
+extern int grsec_enable_tpe;
+extern int grsec_tpe_gid;
+extern int grsec_enable_tpe_all;
-+extern int grsec_enable_sidcaps;
++extern int grsec_enable_tpe_invert;
+extern int grsec_enable_socket_all;
+extern int grsec_socket_all_gid;
+extern int grsec_enable_socket_client;
@@ -45963,7 +46364,7 @@ diff -urNp linux-2.6.34.7/include/linux/rmap.h linux-2.6.34.7/include/linux/rmap
diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sched.h
--- linux-2.6.34.7/include/linux/sched.h 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/include/linux/sched.h 2010-09-15 02:12:09.000000000 -0400
++++ linux-2.6.34.7/include/linux/sched.h 2010-09-17 18:52:03.000000000 -0400
@@ -101,6 +101,7 @@ struct bio_list;
struct fs_struct;
struct bts_context;
@@ -45972,7 +46373,20 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
/*
* List of flags we want to share for kernel threads,
-@@ -628,6 +629,15 @@ struct signal_struct {
+@@ -382,10 +383,12 @@ struct user_namespace;
+ #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+
+ #include <linux/aio.h>
+
+ #ifdef CONFIG_MMU
++extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+@@ -628,6 +631,15 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
@@ -45988,7 +46402,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
int oom_adj; /* OOM kill score adjustment (bit shift) */
};
-@@ -1169,7 +1179,7 @@ struct rcu_node;
+@@ -1169,7 +1181,7 @@ struct rcu_node;
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -45997,7 +46411,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
-@@ -1283,8 +1293,8 @@ struct task_struct {
+@@ -1283,8 +1295,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -46008,7 +46422,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1300,16 +1310,6 @@ struct task_struct {
+@@ -1300,16 +1312,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -46025,7 +46439,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1393,6 +1393,15 @@ struct task_struct {
+@@ -1393,6 +1395,15 @@ struct task_struct {
int softirqs_enabled;
int softirq_context;
#endif
@@ -46041,7 +46455,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
-@@ -1413,6 +1422,9 @@ struct task_struct {
+@@ -1413,6 +1424,9 @@ struct task_struct {
struct backing_dev_info *backing_dev_info;
@@ -46051,7 +46465,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
struct io_context *io_context;
unsigned long ptrace_message;
-@@ -1476,6 +1488,20 @@ struct task_struct {
+@@ -1476,6 +1490,20 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
@@ -46072,7 +46486,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
-@@ -1507,6 +1533,52 @@ struct task_struct {
+@@ -1507,6 +1535,52 @@ struct task_struct {
#endif
};
@@ -46125,7 +46539,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-@@ -2108,7 +2180,7 @@ extern void __cleanup_sighand(struct sig
+@@ -2108,7 +2182,7 @@ extern void __cleanup_sighand(struct sig
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -46134,7 +46548,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2221,8 +2293,8 @@ static inline void unlock_task_sighand(s
+@@ -2221,8 +2295,8 @@ static inline void unlock_task_sighand(s
#ifndef __HAVE_THREAD_FUNCTIONS
@@ -46145,7 +46559,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
-@@ -2237,13 +2309,17 @@ static inline unsigned long *end_of_stac
+@@ -2237,13 +2311,17 @@ static inline unsigned long *end_of_stac
#endif
@@ -47932,7 +48346,7 @@ diff -urNp linux-2.6.34.7/kernel/fork.c linux-2.6.34.7/kernel/fork.c
new_fs = fs;
diff -urNp linux-2.6.34.7/kernel/futex.c linux-2.6.34.7/kernel/futex.c
--- linux-2.6.34.7/kernel/futex.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/kernel/futex.c 2010-08-13 18:38:12.000000000 -0400
++++ linux-2.6.34.7/kernel/futex.c 2010-09-17 17:43:22.000000000 -0400
@@ -54,6 +54,7 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
@@ -47962,19 +48376,17 @@ diff -urNp linux-2.6.34.7/kernel/futex.c linux-2.6.34.7/kernel/futex.c
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
-@@ -2376,7 +2382,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+@@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
{
struct robust_list_head __user *head;
unsigned long ret;
-- const struct cred *cred = current_cred(), *pcred;
+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ const struct cred *cred = current_cred();
-+ const struct cred *pcred;
+ const struct cred *cred = current_cred(), *pcred;
+#endif
if (!futex_cmpxchg_enabled)
return -ENOSYS;
-@@ -2392,11 +2401,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+@@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
if (!p)
goto err_unlock;
ret = -EPERM;
@@ -47991,7 +48403,7 @@ diff -urNp linux-2.6.34.7/kernel/futex.c linux-2.6.34.7/kernel/futex.c
head = p->robust_list;
rcu_read_unlock();
}
-@@ -2458,7 +2472,7 @@ retry:
+@@ -2458,7 +2471,7 @@ retry:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
@@ -49462,7 +49874,7 @@ diff -urNp linux-2.6.34.7/kernel/sys.c linux-2.6.34.7/kernel/sys.c
}
diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c
--- linux-2.6.34.7/kernel/sysctl.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/kernel/sysctl.c 2010-08-13 18:38:12.000000000 -0400
++++ linux-2.6.34.7/kernel/sysctl.c 2010-09-17 18:52:03.000000000 -0400
@@ -76,6 +76,13 @@
@@ -49529,7 +49941,21 @@ diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
-@@ -1630,6 +1668,16 @@ int sysctl_perm(struct ctl_table_root *r
+@@ -1124,6 +1162,13 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
++ {
++ .procname = "heap_stack_gap",
++ .data = &sysctl_heap_stack_gap,
++ .maxlen = sizeof(sysctl_heap_stack_gap),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ #else
+ {
+ .procname = "nr_trim_pages",
+@@ -1630,6 +1675,16 @@ int sysctl_perm(struct ctl_table_root *r
int error;
int mode;
@@ -49546,7 +49972,7 @@ diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c
error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
if (error)
return error;
-@@ -2138,6 +2186,8 @@ static int __do_proc_dointvec(void *tbl_
+@@ -2138,6 +2193,8 @@ static int __do_proc_dointvec(void *tbl_
len = strlen(buf);
if (len > left)
len = left;
@@ -49555,7 +49981,7 @@ diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c
if(copy_to_user(s, buf, len))
return -EFAULT;
left -= len;
-@@ -2363,6 +2413,8 @@ static int __do_proc_doulongvec_minmax(v
+@@ -2363,6 +2420,8 @@ static int __do_proc_doulongvec_minmax(v
len = strlen(buf);
if (len > left)
len = left;
@@ -49754,25 +50180,6 @@ diff -urNp linux-2.6.34.7/kernel/trace/ftrace.c linux-2.6.34.7/kernel/trace/ftra
}
/*
-diff -urNp linux-2.6.34.7/kernel/trace/Kconfig linux-2.6.34.7/kernel/trace/Kconfig
---- linux-2.6.34.7/kernel/trace/Kconfig 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/kernel/trace/Kconfig 2010-08-13 18:38:12.000000000 -0400
-@@ -124,6 +124,7 @@ if FTRACE
- config FUNCTION_TRACER
- bool "Kernel Function Tracer"
- depends on HAVE_FUNCTION_TRACER
-+ depends on !PAX_KERNEXEC
- select FRAME_POINTER
- select KALLSYMS
- select GENERIC_TRACER
-@@ -353,6 +354,7 @@ config PROFILE_KSYM_TRACER
- config STACK_TRACER
- bool "Trace max stack"
- depends on HAVE_FUNCTION_TRACER
-+ depends on !PAX_KERNEXEC
- select FUNCTION_TRACER
- select STACKTRACE
- select KALLSYMS
diff -urNp linux-2.6.34.7/kernel/trace/ring_buffer.c linux-2.6.34.7/kernel/trace/ring_buffer.c
--- linux-2.6.34.7/kernel/trace/ring_buffer.c 2010-08-29 21:16:43.000000000 -0400
+++ linux-2.6.34.7/kernel/trace/ring_buffer.c 2010-08-29 21:17:12.000000000 -0400
@@ -50272,16 +50679,8 @@ diff -urNp linux-2.6.34.7/mm/madvise.c linux-2.6.34.7/mm/madvise.c
goto out;
diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
--- linux-2.6.34.7/mm/memory.c 2010-08-29 21:16:43.000000000 -0400
-+++ linux-2.6.34.7/mm/memory.c 2010-08-29 21:17:12.000000000 -0400
-@@ -48,6 +48,7 @@
- #include <linux/ksm.h>
- #include <linux/rmap.h>
- #include <linux/module.h>
-+#include <linux/security.h>
- #include <linux/delayacct.h>
- #include <linux/init.h>
- #include <linux/writeback.h>
-@@ -259,8 +260,12 @@ static inline void free_pmd_range(struct
++++ linux-2.6.34.7/mm/memory.c 2010-09-17 18:41:42.000000000 -0400
+@@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
return;
pmd = pmd_offset(pud, start);
@@ -50294,7 +50693,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -292,8 +297,12 @@ static inline void free_pud_range(struct
+@@ -292,8 +296,12 @@ static inline void free_pud_range(struct
return;
pud = pud_offset(pgd, start);
@@ -50307,7 +50706,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
}
/*
-@@ -1354,10 +1363,10 @@ int __get_user_pages(struct task_struct
+@@ -1354,10 +1362,10 @@ int __get_user_pages(struct task_struct
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
@@ -50320,7 +50719,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
-@@ -1409,7 +1418,7 @@ int __get_user_pages(struct task_struct
+@@ -1409,7 +1417,7 @@ int __get_user_pages(struct task_struct
continue;
}
@@ -50329,7 +50728,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1484,7 +1493,7 @@ int __get_user_pages(struct task_struct
+@@ -1484,7 +1492,7 @@ int __get_user_pages(struct task_struct
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -50338,7 +50737,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
return i;
}
-@@ -2080,6 +2089,186 @@ static inline void cow_user_page(struct
+@@ -2080,6 +2088,186 @@ static inline void cow_user_page(struct
copy_user_highpage(dst, src, va, vma);
}
@@ -50525,7 +50924,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2266,6 +2455,12 @@ gotten:
+@@ -2266,6 +2454,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -50538,7 +50937,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2317,6 +2512,10 @@ gotten:
+@@ -2317,6 +2511,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -50549,7 +50948,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2725,6 +2924,11 @@ static int do_swap_page(struct mm_struct
+@@ -2725,6 +2923,11 @@ static int do_swap_page(struct mm_struct
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -50561,7 +50960,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
unlock_page(page);
if (flags & FAULT_FLAG_WRITE) {
-@@ -2736,6 +2940,11 @@ static int do_swap_page(struct mm_struct
+@@ -2736,6 +2939,11 @@ static int do_swap_page(struct mm_struct
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -50573,7 +50972,41 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2786,7 +2995,7 @@ static int do_anonymous_page(struct mm_s
+@@ -2751,33 +2959,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_downwards()",
+- * except we must first make sure that 'address-PAGE_SIZE'
+- * doesn't hit another vma.
+- *
+- * The "find_vma()" will do the right thing even if we wrap
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- expand_stack(vma, address - PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2786,27 +2967,23 @@ static int do_anonymous_page(struct mm_s
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -50582,7 +51015,31 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
spinlock_t *ptl;
pte_t entry;
-@@ -2825,6 +3034,11 @@ static int do_anonymous_page(struct mm_s
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -2825,6 +3002,11 @@ static int do_anonymous_page(struct mm_s
if (!pte_none(*page_table))
goto release;
@@ -50594,7 +51051,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -2832,6 +3046,12 @@ setpte:
+@@ -2832,6 +3014,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -50607,7 +51064,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2974,6 +3194,12 @@ static int __do_fault(struct mm_struct *
+@@ -2974,6 +3162,12 @@ static int __do_fault(struct mm_struct *
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -50620,7 +51077,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -2993,6 +3219,14 @@ static int __do_fault(struct mm_struct *
+@@ -2993,6 +3187,14 @@ static int __do_fault(struct mm_struct *
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, page_table);
@@ -50635,7 +51092,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
} else {
if (charged)
mem_cgroup_uncharge_page(page);
-@@ -3140,6 +3374,12 @@ static inline int handle_pte_fault(struc
+@@ -3140,6 +3342,12 @@ static inline int handle_pte_fault(struc
if (flags & FAULT_FLAG_WRITE)
flush_tlb_page(vma, address);
}
@@ -50648,7 +51105,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3156,6 +3396,10 @@ int handle_mm_fault(struct mm_struct *mm
+@@ -3156,6 +3364,10 @@ int handle_mm_fault(struct mm_struct *mm
pmd_t *pmd;
pte_t *pte;
@@ -50659,7 +51116,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3166,6 +3410,34 @@ int handle_mm_fault(struct mm_struct *mm
+@@ -3166,6 +3378,34 @@ int handle_mm_fault(struct mm_struct *mm
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -50694,7 +51151,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3263,7 +3535,7 @@ static int __init gate_vma_init(void)
+@@ -3263,7 +3503,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -50873,7 +51330,7 @@ diff -urNp linux-2.6.34.7/mm/migrate.c linux-2.6.34.7/mm/migrate.c
goto out;
diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c
--- linux-2.6.34.7/mm/mlock.c 2010-08-29 21:16:43.000000000 -0400
-+++ linux-2.6.34.7/mm/mlock.c 2010-09-04 15:37:36.000000000 -0400
++++ linux-2.6.34.7/mm/mlock.c 2010-09-17 18:44:51.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
@@ -50882,7 +51339,40 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/rmap.h>
-@@ -451,6 +452,9 @@ static int do_mlock(unsigned long start,
+@@ -135,19 +136,6 @@ void munlock_vma_page(struct page *page)
+ }
+ }
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_stack_continue(vma->vm_prev, addr);
+-}
+-
+ /**
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
+ * @vma: target vma
+@@ -180,12 +168,6 @@ static long __mlock_vma_pages_range(stru
+ if (vma->vm_flags & VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+- /* We don't try to access the guard page of a stack vma */
+- if (stack_guard_page(vma, start)) {
+- addr += PAGE_SIZE;
+- nr_pages--;
+- }
+-
+ while (nr_pages > 0) {
+ int i;
+
+@@ -451,6 +433,9 @@ static int do_mlock(unsigned long start,
return -EINVAL;
if (end == start)
return 0;
@@ -50892,7 +51382,7 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c
vma = find_vma_prev(current->mm, start, &prev);
if (!vma || vma->vm_start > start)
return -ENOMEM;
-@@ -461,6 +465,11 @@ static int do_mlock(unsigned long start,
+@@ -461,6 +446,11 @@ static int do_mlock(unsigned long start,
for (nstart = start ; ; ) {
unsigned int newflags;
@@ -50904,7 +51394,7 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
newflags = vma->vm_flags | VM_LOCKED;
-@@ -510,6 +519,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+@@ -510,6 +500,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
lock_limit >>= PAGE_SHIFT;
/* check against resource limits */
@@ -50912,7 +51402,7 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = do_mlock(start, len, 1);
up_write(&current->mm->mmap_sem);
-@@ -531,17 +541,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
+@@ -531,17 +522,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
static int do_mlockall(int flags)
{
struct vm_area_struct * vma, * prev = NULL;
@@ -50940,17 +51430,17 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c
newflags = vma->vm_flags | VM_LOCKED;
if (!(flags & MCL_CURRENT))
newflags &= ~VM_LOCKED;
-@@ -573,6 +589,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+@@ -573,6 +570,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
lock_limit >>= PAGE_SHIFT;
ret = -ENOMEM;
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
--- linux-2.6.34.7/mm/mmap.c 2010-08-29 21:16:43.000000000 -0400
-+++ linux-2.6.34.7/mm/mmap.c 2010-09-04 15:37:36.000000000 -0400
++++ linux-2.6.34.7/mm/mmap.c 2010-09-17 18:52:03.000000000 -0400
@@ -44,6 +44,16 @@
#define arch_rebalance_pgtables(addr, len) (addr)
#endif
@@ -50968,7 +51458,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
-@@ -69,16 +79,25 @@ static void unmap_region(struct mm_struc
+@@ -69,22 +79,32 @@ static void unmap_region(struct mm_struc
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
*/
@@ -50996,7 +51486,14 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
}
EXPORT_SYMBOL(vm_get_page_prot);
-@@ -230,6 +249,7 @@ static struct vm_area_struct *remove_vma
+ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ struct percpu_counter vm_committed_as;
+
+ /*
+@@ -230,6 +250,7 @@ static struct vm_area_struct *remove_vma
struct vm_area_struct *next = vma->vm_next;
might_sleep();
@@ -51004,7 +51501,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file) {
-@@ -266,6 +286,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -266,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
* not page aligned -Ram Gupta
*/
rlim = rlimit(RLIMIT_DATA);
@@ -51012,7 +51509,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -695,6 +716,12 @@ static int
+@@ -695,6 +717,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -51025,7 +51522,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -714,6 +741,12 @@ static int
+@@ -714,6 +742,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -51038,7 +51535,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
pgoff_t vm_pglen;
-@@ -756,13 +789,20 @@ can_vma_merge_after(struct vm_area_struc
+@@ -756,13 +790,20 @@ can_vma_merge_after(struct vm_area_struc
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -51060,7 +51557,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -778,6 +818,15 @@ struct vm_area_struct *vma_merge(struct
+@@ -778,6 +819,15 @@ struct vm_area_struct *vma_merge(struct
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -51076,7 +51573,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* Can it merge with the predecessor?
*/
-@@ -797,9 +846,24 @@ struct vm_area_struct *vma_merge(struct
+@@ -797,9 +847,24 @@ struct vm_area_struct *vma_merge(struct
/* cases 1, 6 */
err = vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -51102,7 +51599,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (err)
return NULL;
return prev;
-@@ -812,12 +876,27 @@ struct vm_area_struct *vma_merge(struct
+@@ -812,12 +877,27 @@ struct vm_area_struct *vma_merge(struct
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -51132,7 +51629,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (err)
return NULL;
return area;
-@@ -932,14 +1011,11 @@ none:
+@@ -932,14 +1012,11 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -51148,7 +51645,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
mm->stack_vm += pages;
if (flags & (VM_RESERVED|VM_IO))
mm->reserved_vm += pages;
-@@ -966,7 +1042,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -966,7 +1043,7 @@ unsigned long do_mmap_pgoff(struct file
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -51157,7 +51654,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -992,7 +1068,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -51166,7 +51663,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (addr & ~PAGE_MASK)
return addr;
-@@ -1003,6 +1079,28 @@ unsigned long do_mmap_pgoff(struct file
+@@ -1003,6 +1080,28 @@ unsigned long do_mmap_pgoff(struct file
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -51195,7 +51692,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -1014,6 +1112,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -1014,6 +1113,7 @@ unsigned long do_mmap_pgoff(struct file
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
@@ -51203,7 +51700,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1084,6 +1183,9 @@ unsigned long do_mmap_pgoff(struct file
+@@ -1084,6 +1184,9 @@ unsigned long do_mmap_pgoff(struct file
if (error)
return error;
@@ -51213,7 +51710,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1160,10 +1262,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
+@@ -1160,10 +1263,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
*/
int vma_wants_writenotify(struct vm_area_struct *vma)
{
@@ -51226,7 +51723,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1212,14 +1314,24 @@ unsigned long mmap_region(struct file *f
+@@ -1212,14 +1315,24 @@ unsigned long mmap_region(struct file *f
unsigned long charged = 0;
struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
@@ -51253,7 +51750,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
}
/* Check against address space limit. */
-@@ -1268,6 +1380,16 @@ munmap_back:
+@@ -1268,6 +1381,16 @@ munmap_back:
goto unacct_error;
}
@@ -51270,7 +51767,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1291,6 +1413,19 @@ munmap_back:
+@@ -1291,6 +1414,19 @@ munmap_back:
error = file->f_op->mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -51290,7 +51787,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
-@@ -1326,6 +1461,11 @@ munmap_back:
+@@ -1326,6 +1462,11 @@ munmap_back:
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -51302,7 +51799,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/* Once vma denies write, undo our temporary denial count */
if (correct_wcount)
atomic_inc(&inode->i_writecount);
-@@ -1334,6 +1474,7 @@ out:
+@@ -1334,6 +1475,7 @@ out:
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -51310,7 +51807,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (vm_flags & VM_LOCKED) {
if (!mlock_vma_pages_range(vma, addr, addr + len))
mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1351,6 +1492,12 @@ unmap_and_free_vma:
+@@ -1351,6 +1493,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -51323,7 +51820,41 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1384,6 +1531,10 @@ arch_get_unmapped_area(struct file *filp
+@@ -1358,6 +1506,33 @@ unacct_error:
+ return error;
+ }
+
++bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++{
++ if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++ if (addr > sysctl_heap_stack_gap)
++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++ else
++ vma = find_vma(current->mm, 0);
++ if (vma && (vma->vm_flags & VM_GROWSUP))
++ return false;
++#endif
++ return true;
++ }
++
++ if (addr + len > vma->vm_start)
++ return false;
++
++ if (vma->vm_flags & VM_GROWSDOWN)
++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
++#endif
++
++ return true;
++}
++
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+@@ -1384,18 +1559,23 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -51333,9 +51864,15 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
+
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-@@ -1392,10 +1543,10 @@ arch_get_unmapped_area(struct file *filp
- return addr;
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
}
if (len > mm->cached_hole_size) {
- start_addr = addr = mm->free_area_cache;
@@ -51348,7 +51885,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
}
full_search:
-@@ -1406,9 +1557,8 @@ full_search:
+@@ -1406,34 +1586,40 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -51360,7 +51897,29 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -1430,10 +1580,16 @@ full_search:
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- /*
+- * Remember the place where we stopped the search:
+- */
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+ #endif
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
@@ -51378,7 +51937,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
mm->free_area_cache = addr;
mm->cached_hole_size = ~0UL;
}
-@@ -1451,7 +1607,7 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -1451,7 +1637,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -51387,7 +51946,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -1460,6 +1616,10 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -1460,13 +1646,18 @@ arch_get_unmapped_area_topdown(struct fi
if (flags & MAP_FIXED)
return addr;
@@ -51398,7 +51957,37 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
-@@ -1517,13 +1677,21 @@ bottomup:
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -1481,7 +1672,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -1498,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+@@ -1517,13 +1708,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -51422,7 +52011,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
mm->cached_hole_size = ~0UL;
return addr;
-@@ -1532,6 +1700,12 @@ bottomup:
+@@ -1532,6 +1731,12 @@ bottomup:
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -51435,7 +52024,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1539,8 +1713,10 @@ void arch_unmap_area_topdown(struct mm_s
+@@ -1539,8 +1744,10 @@ void arch_unmap_area_topdown(struct mm_s
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -51447,7 +52036,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
}
unsigned long
-@@ -1648,6 +1824,34 @@ out:
+@@ -1648,6 +1855,34 @@ out:
return prev ? prev->vm_next : vma;
}
@@ -51482,7 +52071,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -1664,6 +1868,7 @@ static int acct_stack_growth(struct vm_a
+@@ -1664,6 +1899,7 @@ static int acct_stack_growth(struct vm_a
return -ENOMEM;
/* Stack limit test */
@@ -51490,7 +52079,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
-@@ -1674,6 +1879,7 @@ static int acct_stack_growth(struct vm_a
+@@ -1674,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
locked = mm->locked_vm + grow;
limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
limit >>= PAGE_SHIFT;
@@ -51498,7 +52087,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -1709,35 +1915,40 @@ static
+@@ -1709,35 +1946,42 @@ static
#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
@@ -51521,7 +52110,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
-+ if (locknext && unlikely(anon_vma_prepare(vma->vm_next)))
++ if (locknext && anon_vma_prepare(vma->vm_next))
+ return -ENOMEM;
anon_vma_lock(vma);
+ if (locknext)
@@ -51545,11 +52134,13 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/* Somebody else might have raced and expanded it already */
- if (address > vma->vm_end) {
-+ if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -1747,6 +1958,8 @@ int expand_upwards(struct vm_area_struct
+@@ -1747,6 +1991,8 @@ int expand_upwards(struct vm_area_struct
if (!error)
vma->vm_end = address;
}
@@ -51558,25 +52149,25 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
anon_vma_unlock(vma);
return error;
}
-@@ -1758,7 +1971,8 @@ int expand_upwards(struct vm_area_struct
+@@ -1758,7 +2004,8 @@ int expand_upwards(struct vm_area_struct
static int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
- int error;
+ int error, lockprev = 0;
-+ struct vm_area_struct *prev = NULL;
++ struct vm_area_struct *prev;
/*
* We must make sure the anon_vma is allocated
-@@ -1772,6 +1986,15 @@ static int expand_downwards(struct vm_ar
+@@ -1772,6 +2019,15 @@ static int expand_downwards(struct vm_ar
if (error)
return error;
++ prev = vma->vm_prev;
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
-+ find_vma_prev(vma->vm_mm, address, &prev);
+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
+#endif
-+ if (lockprev && unlikely(anon_vma_prepare(prev)))
++ if (lockprev && anon_vma_prepare(prev))
+ return -ENOMEM;
+ if (lockprev)
+ anon_vma_lock(prev);
@@ -51584,12 +52175,14 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
anon_vma_lock(vma);
/*
-@@ -1781,9 +2004,15 @@ static int expand_downwards(struct vm_ar
+@@ -1781,9 +2037,17 @@ static int expand_downwards(struct vm_ar
*/
/* Somebody else might have raced and expanded it already */
- if (address < vma->vm_start) {
-+ if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
unsigned long size, grow;
+#ifdef CONFIG_PAX_SEGMEXEC
@@ -51601,7 +52194,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -1791,9 +2020,20 @@ static int expand_downwards(struct vm_ar
+@@ -1791,9 +2055,20 @@ static int expand_downwards(struct vm_ar
if (!error) {
vma->vm_start = address;
vma->vm_pgoff -= grow;
@@ -51622,7 +52215,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return error;
}
-@@ -1867,6 +2107,13 @@ static void remove_vma_list(struct mm_st
+@@ -1867,6 +2142,13 @@ static void remove_vma_list(struct mm_st
do {
long nrpages = vma_pages(vma);
@@ -51636,7 +52229,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
-@@ -1912,6 +2159,16 @@ detach_vmas_to_be_unmapped(struct mm_str
+@@ -1912,6 +2194,16 @@ detach_vmas_to_be_unmapped(struct mm_str
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -51653,7 +52246,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -1940,14 +2197,33 @@ static int __split_vma(struct mm_struct
+@@ -1940,14 +2232,33 @@ static int __split_vma(struct mm_struct
struct vm_area_struct *new;
int err = -ENOMEM;
@@ -51687,7 +52280,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -1960,6 +2236,22 @@ static int __split_vma(struct mm_struct
+@@ -1960,6 +2271,22 @@ static int __split_vma(struct mm_struct
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -51710,7 +52303,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
pol = mpol_dup(vma_policy(vma));
if (IS_ERR(pol)) {
err = PTR_ERR(pol);
-@@ -1985,6 +2277,42 @@ static int __split_vma(struct mm_struct
+@@ -1985,6 +2312,42 @@ static int __split_vma(struct mm_struct
else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -51753,7 +52346,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/* Success. */
if (!err)
return 0;
-@@ -2000,6 +2328,15 @@ static int __split_vma(struct mm_struct
+@@ -2000,6 +2363,15 @@ static int __split_vma(struct mm_struct
out_free_mpol:
mpol_put(pol);
out_free_vma:
@@ -51769,7 +52362,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
kmem_cache_free(vm_area_cachep, new);
out_err:
return err;
-@@ -2012,6 +2349,15 @@ static int __split_vma(struct mm_struct
+@@ -2012,6 +2384,15 @@ static int __split_vma(struct mm_struct
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
@@ -51785,7 +52378,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -2023,11 +2369,30 @@ int split_vma(struct mm_struct *mm, stru
+@@ -2023,11 +2404,30 @@ int split_vma(struct mm_struct *mm, stru
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -51816,7 +52409,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -2101,6 +2466,8 @@ int do_munmap(struct mm_struct *mm, unsi
+@@ -2101,6 +2501,8 @@ int do_munmap(struct mm_struct *mm, unsi
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -51825,7 +52418,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return 0;
}
-@@ -2113,22 +2480,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+@@ -2113,22 +2515,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
profile_munmap(addr);
@@ -51854,7 +52447,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -2142,6 +2505,7 @@ unsigned long do_brk(unsigned long addr,
+@@ -2142,6 +2540,7 @@ unsigned long do_brk(unsigned long addr,
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -51862,7 +52455,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
len = PAGE_ALIGN(len);
if (!len)
-@@ -2153,16 +2517,30 @@ unsigned long do_brk(unsigned long addr,
+@@ -2153,16 +2552,30 @@ unsigned long do_brk(unsigned long addr,
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -51894,7 +52487,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
locked += mm->locked_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
-@@ -2179,22 +2557,22 @@ unsigned long do_brk(unsigned long addr,
+@@ -2179,22 +2592,22 @@ unsigned long do_brk(unsigned long addr,
/*
* Clear old maps. this also does some error checking for us
*/
@@ -51921,7 +52514,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2208,7 +2586,7 @@ unsigned long do_brk(unsigned long addr,
+@@ -2208,7 +2621,7 @@ unsigned long do_brk(unsigned long addr,
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -51930,7 +52523,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return -ENOMEM;
}
-@@ -2221,11 +2599,12 @@ unsigned long do_brk(unsigned long addr,
+@@ -2221,11 +2634,12 @@ unsigned long do_brk(unsigned long addr,
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
@@ -51945,7 +52538,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return addr;
}
-@@ -2272,8 +2651,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2272,8 +2686,10 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
@@ -51957,7 +52550,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
-@@ -2287,6 +2668,10 @@ int insert_vm_struct(struct mm_struct *
+@@ -2287,6 +2703,10 @@ int insert_vm_struct(struct mm_struct *
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
@@ -51968,7 +52561,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2309,7 +2694,22 @@ int insert_vm_struct(struct mm_struct *
+@@ -2309,7 +2729,22 @@ int insert_vm_struct(struct mm_struct *
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -51991,7 +52584,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
return 0;
}
-@@ -2327,6 +2727,8 @@ struct vm_area_struct *copy_vma(struct v
+@@ -2327,6 +2762,8 @@ struct vm_area_struct *copy_vma(struct v
struct rb_node **rb_link, *rb_parent;
struct mempolicy *pol;
@@ -52000,7 +52593,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2376,6 +2778,39 @@ struct vm_area_struct *copy_vma(struct v
+@@ -2376,6 +2813,39 @@ struct vm_area_struct *copy_vma(struct v
kmem_cache_free(vm_area_cachep, new_vma);
return NULL;
}
@@ -52040,7 +52633,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
/*
* Return true if the calling process may expand its vm space by the passed
-@@ -2387,7 +2822,7 @@ int may_expand_vm(struct mm_struct *mm,
+@@ -2387,7 +2857,7 @@ int may_expand_vm(struct mm_struct *mm,
unsigned long lim;
lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
@@ -52049,7 +52642,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
if (cur + npages > lim)
return 0;
return 1;
-@@ -2457,6 +2892,17 @@ int install_special_mapping(struct mm_st
+@@ -2457,6 +2927,17 @@ int install_special_mapping(struct mm_st
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -52069,7 +52662,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c
diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
--- linux-2.6.34.7/mm/mprotect.c 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/mm/mprotect.c 2010-08-13 18:38:12.000000000 -0400
++++ linux-2.6.34.7/mm/mprotect.c 2010-09-17 18:52:03.000000000 -0400
@@ -23,10 +23,16 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
@@ -52136,7 +52729,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
-@@ -143,6 +191,14 @@ mprotect_fixup(struct vm_area_struct *vm
+@@ -143,11 +191,29 @@ mprotect_fixup(struct vm_area_struct *vm
int error;
int dirty_accountable = 0;
@@ -52151,7 +52744,22 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
if (newflags == oldflags) {
*pprev = vma;
return 0;
-@@ -164,6 +220,42 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+
++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++ return -ENOMEM;
++
++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++ return -ENOMEM;
++ }
++
+ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+@@ -164,6 +230,42 @@ mprotect_fixup(struct vm_area_struct *vm
}
}
@@ -52194,7 +52802,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
/*
* First try to merge with previous and/or next vma.
*/
-@@ -194,9 +286,21 @@ success:
+@@ -194,9 +296,21 @@ success:
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
@@ -52217,7 +52825,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
if (vma_wants_writenotify(vma)) {
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
-@@ -237,6 +341,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -237,6 +351,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
end = start + len;
if (end <= start)
return -ENOMEM;
@@ -52235,7 +52843,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
if (!arch_validate_prot(prot))
return -EINVAL;
-@@ -244,7 +359,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -244,7 +369,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
@@ -52244,7 +52852,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
-@@ -276,6 +391,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -276,6 +401,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
if (start > vma->vm_start)
prev = vma;
@@ -52261,7 +52869,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c
for (nstart = start ; ; ) {
unsigned long newflags;
-@@ -300,6 +425,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -300,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
if (error)
goto out;
perf_event_mmap(vma);
@@ -52376,8 +52984,16 @@ diff -urNp linux-2.6.34.7/mm/mremap.c linux-2.6.34.7/mm/mremap.c
if (ret & ~PAGE_MASK)
diff -urNp linux-2.6.34.7/mm/nommu.c linux-2.6.34.7/mm/nommu.c
--- linux-2.6.34.7/mm/nommu.c 2010-08-29 21:16:43.000000000 -0400
-+++ linux-2.6.34.7/mm/nommu.c 2010-08-29 21:17:12.000000000 -0400
-@@ -762,15 +762,6 @@ struct vm_area_struct *find_vma(struct m
++++ linux-2.6.34.7/mm/nommu.c 2010-09-17 18:52:03.000000000 -0400
+@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+
+ atomic_long_t mmap_pages_allocated;
+
+@@ -762,15 +761,6 @@ struct vm_area_struct *find_vma(struct m
EXPORT_SYMBOL(find_vma);
/*
@@ -52393,7 +53009,7 @@ diff -urNp linux-2.6.34.7/mm/nommu.c linux-2.6.34.7/mm/nommu.c
* expand a stack to a given address
* - not supported under NOMMU conditions
*/
-@@ -1487,6 +1478,7 @@ int split_vma(struct mm_struct *mm, stru
+@@ -1487,6 +1477,7 @@ int split_vma(struct mm_struct *mm, stru
/* most fields are the same, copy all, and then fixup */
*new = *vma;
@@ -55100,7 +55716,7 @@ diff -urNp linux-2.6.34.7/security/integrity/ima/ima_queue.c linux-2.6.34.7/secu
return 0;
diff -urNp linux-2.6.34.7/security/Kconfig linux-2.6.34.7/security/Kconfig
--- linux-2.6.34.7/security/Kconfig 2010-08-13 16:29:15.000000000 -0400
-+++ linux-2.6.34.7/security/Kconfig 2010-09-15 02:12:09.000000000 -0400
++++ linux-2.6.34.7/security/Kconfig 2010-09-17 17:39:50.000000000 -0400
@@ -4,6 +4,505 @@
menu "Security options"
@@ -55124,7 +55740,7 @@ diff -urNp linux-2.6.34.7/security/Kconfig linux-2.6.34.7/security/Kconfig
+
+config PAX
+ bool "Enable various PaX features"
-+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
+ help
+ This allows you to enable various PaX features. PaX adds
+ intrusion prevention mechanisms to the kernel that reduce
diff --git a/2.6.34/4425_grsec-pax-without-grsec.patch b/2.6.34/4425_grsec-pax-without-grsec.patch
index 9aec296..2fc4199 100644
--- a/2.6.34/4425_grsec-pax-without-grsec.patch
+++ b/2.6.34/4425_grsec-pax-without-grsec.patch
@@ -54,7 +54,7 @@ The original version of this patch contained no credits/description.
current->comm, task_pid_nr(current), current_uid(), current_euid());
print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
@@ -1846,10 +1850,12 @@
- #ifdef CONFIG_PAX_USERCOPY
+
void pax_report_leak_to_user(const void *ptr, unsigned long len)
{
+#ifdef CONFIG_GRKERNSEC
@@ -81,12 +81,12 @@ The original version of this patch contained no credits/description.
do_group_exit(SIGKILL);
--- a/security/Kconfig
+++ b/security/Kconfig
-@@ -23,7 +23,7 @@ menu "PaX"
-
+@@ -23,7 +23,7 @@
+
config PAX
bool "Enable various PaX features"
-- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
-+ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
+- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
help
This allows you to enable various PaX features. PaX adds
intrusion prevention mechanisms to the kernel that reduce
diff --git a/2.6.34/4430_grsec-kconfig-default-gids.patch b/2.6.34/4430_grsec-kconfig-default-gids.patch
index b7a0413..7ba8aa2 100644
--- a/2.6.34/4430_grsec-kconfig-default-gids.patch
+++ b/2.6.34/4430_grsec-kconfig-default-gids.patch
@@ -29,25 +29,25 @@ from shooting themselves in the foot.
config GRKERNSEC_EXECLOG
bool "Exec logging"
-@@ -780,7 +780,7 @@
+@@ -785,7 +785,7 @@
config GRKERNSEC_TPE_GID
int "GID for untrusted users"
depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
- default 1005
+ default 100
help
- If you have selected the "Invert GID option" above, setting this
- GID determines what group TPE restrictions will be *disabled* for.
-@@ -792,7 +792,7 @@
+ Setting this GID determines what group TPE restrictions will be
+ *enabled* for. If the sysctl option is enabled, a sysctl option
+@@ -794,7 +794,7 @@
config GRKERNSEC_TPE_GID
int "GID for trusted users"
depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
- default 1005
+ default 10
help
- If you have selected the "Invert GID option" above, setting this
- GID determines what group TPE restrictions will be *disabled* for.
-@@ -866,7 +866,7 @@
+ Setting this GID determines what group TPE restrictions will be
+ *disabled* for. If the sysctl option is enabled, a sysctl option
+@@ -865,7 +865,7 @@
config GRKERNSEC_SOCKET_ALL_GID
int "GID to deny all sockets for"
depends on GRKERNSEC_SOCKET_ALL
@@ -56,7 +56,7 @@ from shooting themselves in the foot.
help
Here you can choose the GID to disable socket access for. Remember to
add the users you want socket access disabled for to the GID
-@@ -887,7 +887,7 @@
+@@ -886,7 +886,7 @@
config GRKERNSEC_SOCKET_CLIENT_GID
int "GID to deny client sockets for"
depends on GRKERNSEC_SOCKET_CLIENT
@@ -65,7 +65,7 @@ from shooting themselves in the foot.
help
Here you can choose the GID to disable client socket access for.
Remember to add the users you want client socket access disabled for to
-@@ -905,7 +905,7 @@
+@@ -904,7 +904,7 @@
config GRKERNSEC_SOCKET_SERVER_GID
int "GID to deny server sockets for"
depends on GRKERNSEC_SOCKET_SERVER
diff --git a/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch b/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch
index dfedf22..64d6cf3 100644
--- a/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch
+++ b/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch
@@ -21,7 +21,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
--- a/grsecurity/Kconfig
+++ b/grsecurity/Kconfig
-@@ -1372,6 +1372,27 @@
+@@ -1371,6 +1371,27 @@
menu "Logging Options"
depends on GRKERNSEC
diff --git a/2.6.35/0000_README b/2.6.35/0000_README
new file mode 100644
index 0000000..53076a6
--- /dev/null
+++ b/2.6.35/0000_README
@@ -0,0 +1,50 @@
+README
+-----------------------------------------------------------------------------
+
+Individual Patch Descriptions:
+-----------------------------------------------------------------------------
+Patch: 4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch
+From: http://www.grsecurity.net
+Desc: hardened-sources base patch from upstream grsecurity
+
+Patch: 4421_grsec-remove-localversion-grsec.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Removes grsecurity's localversion-grsec file
+
+Patch: 4422_grsec-mute-warnings.patch
+From: Alexander Gabert <gaberta@fh-trier.de>
+ Gordon Malm <gengor@gentoo.org>
+Desc: Removes verbose compile warning settings from grsecurity, restores
+ mainline Linux kernel behavior
+
+Patch: 4423_grsec-remove-protected-paths.patch
+From: Anthony G. Basile, Ph. D. <basile@opensource.dyc.edu>
+Desc: Removes chmod statements from grsecurity/Makefile
+
+Patch: 4425_grsec-pax-without-grsec.patch
+From: Gordon Malm <gengor@gentoo.org>
+Desc: Allows PaX features to be selected without enabling GRKERNSEC
+
+Patch: 4430_grsec-kconfig-default-gids.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Sets sane(r) default GIDs on various grsecurity group-dependent
+ features
+
+Patch: 4435_grsec-kconfig-gentoo.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Kerin Millar <kerframil@gmail.com>
+Desc: Adds Hardened Gentoo [server/workstation] security levels, sets
+ Hardened Gentoo [workstation] as default
+
+Patch: 4440_selinux-avc_audit-log-curr_ip.patch
+From: Gordon Malm <gengor@gentoo.org>
+Desc: Configurable option to add src IP address to SELinux log messages
+
+Patch: 4445_disable-compat_vdso.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Kerin Millar <kerframil@gmail.com>
+Desc: Disables VDSO_COMPAT operation completely
+
+Patch: 4450_check_ssp_fix.patch
+From: Magnus Granberg <zorry@gentoo.org>
+Desc: Fixes kernel check script for ssp
diff --git a/2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch b/2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch
new file mode 100644
index 0000000..ff2fb9b
--- /dev/null
+++ b/2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch
@@ -0,0 +1,56800 @@
+diff -urNp linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -3,9 +3,9 @@
+
+ #include <linux/dma-attrs.h>
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+diff -urNp linux-2.6.35.4/arch/alpha/include/asm/elf.h linux-2.6.35.4/arch/alpha/include/asm/elf.h
+--- linux-2.6.35.4/arch/alpha/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -urNp linux-2.6.35.4/arch/alpha/include/asm/pgtable.h linux-2.6.35.4/arch/alpha/include/asm/pgtable.h
+--- linux-2.6.35.4/arch/alpha/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -urNp linux-2.6.35.4/arch/alpha/kernel/module.c linux-2.6.35.4/arch/alpha/kernel/module.c
+--- linux-2.6.35.4/arch/alpha/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
+@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff -urNp linux-2.6.35.4/arch/alpha/kernel/osf_sys.c linux-2.6.35.4/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.35.4/arch/alpha/kernel/osf_sys.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/kernel/osf_sys.c 2010-09-17 20:12:09.000000000 -0400
+@@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+@@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1213,8 +1217,8 @@ arch_get_unmapped_area(struct file *filp
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -urNp linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c
+--- linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev
+ return 0;
+ }
+
+-struct dma_map_ops alpha_pci_ops = {
++const struct dma_map_ops alpha_pci_ops = {
+ .alloc_coherent = alpha_pci_alloc_coherent,
+ .free_coherent = alpha_pci_free_coherent,
+ .map_page = alpha_pci_map_page,
+@@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = {
+ .set_dma_mask = alpha_pci_set_mask,
+ };
+
+-struct dma_map_ops *dma_ops = &alpha_pci_ops;
++const struct dma_map_ops *dma_ops = &alpha_pci_ops;
+ EXPORT_SYMBOL(dma_ops);
+diff -urNp linux-2.6.35.4/arch/alpha/kernel/pci-noop.c linux-2.6.35.4/arch/alpha/kernel/pci-noop.c
+--- linux-2.6.35.4/arch/alpha/kernel/pci-noop.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/kernel/pci-noop.c 2010-09-17 20:12:09.000000000 -0400
+@@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de
+ return 0;
+ }
+
+-struct dma_map_ops alpha_noop_ops = {
++const struct dma_map_ops alpha_noop_ops = {
+ .alloc_coherent = alpha_noop_alloc_coherent,
+ .free_coherent = alpha_noop_free_coherent,
+ .map_page = alpha_noop_map_page,
+@@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = {
+ .set_dma_mask = alpha_noop_set_mask,
+ };
+
+-struct dma_map_ops *dma_ops = &alpha_noop_ops;
++const struct dma_map_ops *dma_ops = &alpha_noop_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+diff -urNp linux-2.6.35.4/arch/alpha/mm/fault.c linux-2.6.35.4/arch/alpha/mm/fault.c
+--- linux-2.6.35.4/arch/alpha/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/alpha/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -urNp linux-2.6.35.4/arch/arm/include/asm/elf.h linux-2.6.35.4/arch/arm/include/asm/elf.h
+--- linux-2.6.35.4/arch/arm/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -111,7 +111,14 @@ int dump_task_regs(struct task_struct *t
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff -urNp linux-2.6.35.4/arch/arm/include/asm/kmap_types.h linux-2.6.35.4/arch/arm/include/asm/kmap_types.h
+--- linux-2.6.35.4/arch/arm/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -21,6 +21,7 @@ enum km_type {
+ KM_L1_CACHE,
+ KM_L2_CACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.35.4/arch/arm/include/asm/uaccess.h linux-2.6.35.4/arch/arm/include/asm/uaccess.h
+--- linux-2.6.35.4/arch/arm/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
+@@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -412,6 +415,9 @@ static inline unsigned long __must_check
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff -urNp linux-2.6.35.4/arch/arm/kernel/kgdb.c linux-2.6.35.4/arch/arm/kernel/kgdb.c
+--- linux-2.6.35.4/arch/arm/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -208,7 +208,7 @@ void kgdb_arch_exit(void)
+ * and we handle the normal undef case within the do_undefinstr
+ * handler.
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ #ifndef __ARMEB__
+ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
+ #else /* ! __ARMEB__ */
+diff -urNp linux-2.6.35.4/arch/arm/mach-at91/pm.c linux-2.6.35.4/arch/arm/mach-at91/pm.c
+--- linux-2.6.35.4/arch/arm/mach-at91/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-at91/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -294,7 +294,7 @@ static void at91_pm_end(void)
+ }
+
+
+-static struct platform_suspend_ops at91_pm_ops ={
++static const struct platform_suspend_ops at91_pm_ops ={
+ .valid = at91_pm_valid_state,
+ .begin = at91_pm_begin,
+ .enter = at91_pm_enter,
+diff -urNp linux-2.6.35.4/arch/arm/mach-davinci/pm.c linux-2.6.35.4/arch/arm/mach-davinci/pm.c
+--- linux-2.6.35.4/arch/arm/mach-davinci/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-davinci/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_stat
+ return ret;
+ }
+
+-static struct platform_suspend_ops davinci_pm_ops = {
++static const struct platform_suspend_ops davinci_pm_ops = {
+ .enter = davinci_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff -urNp linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c
+--- linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c 2010-09-17 20:12:09.000000000 -0400
+@@ -47,6 +47,7 @@ static ssize_t last_radio_log_read(struc
+ return count;
+ }
+
++/* cannot be const, see msm_init_last_radio_log */
+ static struct file_operations last_radio_log_fops = {
+ .read = last_radio_log_read
+ };
+diff -urNp linux-2.6.35.4/arch/arm/mach-omap1/pm.c linux-2.6.35.4/arch/arm/mach-omap1/pm.c
+--- linux-2.6.35.4/arch/arm/mach-omap1/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-omap1/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
+
+
+
+-static struct platform_suspend_ops omap_pm_ops ={
++static const struct platform_suspend_ops omap_pm_ops ={
+ .prepare = omap_pm_prepare,
+ .enter = omap_pm_enter,
+ .finish = omap_pm_finish,
+diff -urNp linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c
+--- linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c 2010-09-17 20:12:09.000000000 -0400
+@@ -325,7 +325,7 @@ static void omap2_pm_finish(void)
+ enable_hlt();
+ }
+
+-static struct platform_suspend_ops omap_pm_ops = {
++static const struct platform_suspend_ops omap_pm_ops = {
+ .prepare = omap2_pm_prepare,
+ .enter = omap2_pm_enter,
+ .finish = omap2_pm_finish,
+diff -urNp linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c
+--- linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c 2010-09-17 20:12:09.000000000 -0400
+@@ -669,7 +669,7 @@ static void omap3_pm_end(void)
+ return;
+ }
+
+-static struct platform_suspend_ops omap_pm_ops = {
++static const struct platform_suspend_ops omap_pm_ops = {
+ .begin = omap3_pm_begin,
+ .end = omap3_pm_end,
+ .prepare = omap3_pm_prepare,
+diff -urNp linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c
+--- linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_stat
+ (state == PM_SUSPEND_MEM);
+ }
+
+-static struct platform_suspend_ops pnx4008_pm_ops = {
++static const struct platform_suspend_ops pnx4008_pm_ops = {
+ .enter = pnx4008_pm_enter,
+ .valid = pnx4008_pm_valid,
+ };
+diff -urNp linux-2.6.35.4/arch/arm/mach-pxa/pm.c linux-2.6.35.4/arch/arm/mach-pxa/pm.c
+--- linux-2.6.35.4/arch/arm/mach-pxa/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-pxa/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -96,7 +96,7 @@ void pxa_pm_finish(void)
+ pxa_cpu_pm_fns->finish();
+ }
+
+-static struct platform_suspend_ops pxa_pm_ops = {
++static const struct platform_suspend_ops pxa_pm_ops = {
+ .valid = pxa_pm_valid,
+ .enter = pxa_pm_enter,
+ .prepare = pxa_pm_prepare,
+diff -urNp linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c
+--- linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
+ }
+
+ #ifdef CONFIG_PM
+-static struct platform_suspend_ops sharpsl_pm_ops = {
++static const struct platform_suspend_ops sharpsl_pm_ops = {
+ .prepare = pxa_pm_prepare,
+ .finish = pxa_pm_finish,
+ .enter = corgi_pxa_pm_enter,
+diff -urNp linux-2.6.35.4/arch/arm/mach-sa1100/pm.c linux-2.6.35.4/arch/arm/mach-sa1100/pm.c
+--- linux-2.6.35.4/arch/arm/mach-sa1100/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mach-sa1100/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
+ return virt_to_phys(sp);
+ }
+
+-static struct platform_suspend_ops sa11x0_pm_ops = {
++static const struct platform_suspend_ops sa11x0_pm_ops = {
+ .enter = sa11x0_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff -urNp linux-2.6.35.4/arch/arm/mm/fault.c linux-2.6.35.4/arch/arm/mm/fault.c
+--- linux-2.6.35.4/arch/arm/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (fsr & FSR_LNX_PF) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsign
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+diff -urNp linux-2.6.35.4/arch/arm/mm/mmap.c linux-2.6.35.4/arch/arm/mm/mmap.c
+--- linux-2.6.35.4/arch/arm/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
+@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -94,14 +97,14 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+diff -urNp linux-2.6.35.4/arch/arm/plat-samsung/pm.c linux-2.6.35.4/arch/arm/plat-samsung/pm.c
+--- linux-2.6.35.4/arch/arm/plat-samsung/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/arm/plat-samsung/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
+ s3c_pm_check_cleanup();
+ }
+
+-static struct platform_suspend_ops s3c_pm_ops = {
++static const struct platform_suspend_ops s3c_pm_ops = {
+ .enter = s3c_pm_enter,
+ .prepare = s3c_pm_prepare,
+ .finish = s3c_pm_finish,
+diff -urNp linux-2.6.35.4/arch/avr32/include/asm/elf.h linux-2.6.35.4/arch/avr32/include/asm/elf.h
+--- linux-2.6.35.4/arch/avr32/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/avr32/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff -urNp linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h
+--- linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff -urNp linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c
+--- linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -176,7 +176,7 @@ out:
+ return 0;
+ }
+
+-static struct platform_suspend_ops avr32_pm_ops = {
++static const struct platform_suspend_ops avr32_pm_ops = {
+ .valid = avr32_pm_valid_state,
+ .enter = avr32_pm_enter,
+ };
+diff -urNp linux-2.6.35.4/arch/avr32/mm/fault.c linux-2.6.35.4/arch/avr32/mm/fault.c
+--- linux-2.6.35.4/arch/avr32/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/avr32/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -157,6 +174,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff -urNp linux-2.6.35.4/arch/blackfin/kernel/kgdb.c linux-2.6.35.4/arch/blackfin/kernel/kgdb.c
+--- linux-2.6.35.4/arch/blackfin/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/blackfin/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vecto
+ return -1; /* this means that we do not want to exit from the handler */
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0xa1},
+ #ifdef CONFIG_SMP
+ .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
+diff -urNp linux-2.6.35.4/arch/blackfin/mach-common/pm.c linux-2.6.35.4/arch/blackfin/mach-common/pm.c
+--- linux-2.6.35.4/arch/blackfin/mach-common/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/blackfin/mach-common/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -232,7 +232,7 @@ static int bfin_pm_enter(suspend_state_t
+ return 0;
+ }
+
+-struct platform_suspend_ops bfin_pm_ops = {
++const struct platform_suspend_ops bfin_pm_ops = {
+ .enter = bfin_pm_enter,
+ .valid = bfin_pm_valid,
+ };
+diff -urNp linux-2.6.35.4/arch/blackfin/mm/maccess.c linux-2.6.35.4/arch/blackfin/mm/maccess.c
+--- linux-2.6.35.4/arch/blackfin/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/blackfin/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400
+@@ -16,7 +16,7 @@ static int validate_memory_access_addres
+ return bfin_mem_access_type(addr, size);
+ }
+
+-long probe_kernel_read(void *dst, void *src, size_t size)
++long probe_kernel_read(void *dst, const void *src, size_t size)
+ {
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *
+ return -EFAULT;
+ }
+
+-long probe_kernel_write(void *dst, void *src, size_t size)
++long probe_kernel_write(void *dst, const void *src, size_t size)
+ {
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+diff -urNp linux-2.6.35.4/arch/frv/include/asm/kmap_types.h linux-2.6.35.4/arch/frv/include/asm/kmap_types.h
+--- linux-2.6.35.4/arch/frv/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/frv/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.35.4/arch/frv/mm/elf-fdpic.c linux-2.6.35.4/arch/frv/mm/elf-fdpic.c
+--- linux-2.6.35.4/arch/frv/mm/elf-fdpic.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/frv/mm/elf-fdpic.c 2010-09-17 20:12:09.000000000 -0400
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+diff -urNp linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c
+--- linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -17,7 +17,7 @@
+ #include <linux/swiotlb.h>
+ #include <asm/machvec.h>
+
+-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
++extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+
+ /* swiotlb declarations & definitions: */
+ extern int swiotlb_late_init_with_default_size (size_t size);
+@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
+ !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
+ }
+
+-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
++const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
+ {
+ if (use_swiotlb(dev))
+ return &swiotlb_dma_ops;
+diff -urNp linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c
+--- linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
+ },
+ };
+
+-extern struct dma_map_ops swiotlb_dma_ops;
++extern const struct dma_map_ops swiotlb_dma_ops;
+
+ static int __init
+ sba_init(void)
+@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
+
+ __setup("sbapagesize=",sba_page_override);
+
+-struct dma_map_ops sba_dma_ops = {
++const struct dma_map_ops sba_dma_ops = {
+ .alloc_coherent = sba_alloc_coherent,
+ .free_coherent = sba_free_coherent,
+ .map_page = sba_map_page,
+diff -urNp linux-2.6.35.4/arch/ia64/include/asm/compat.h linux-2.6.35.4/arch/ia64/include/asm/compat.h
+--- linux-2.6.35.4/arch/ia64/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
+ }
+
+ static __inline__ void __user *
+-compat_alloc_user_space (long len)
++arch_compat_alloc_user_space (long len)
+ {
+ struct pt_regs *regs = task_pt_regs(current);
+ return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
+diff -urNp linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -12,7 +12,7 @@
+
+ #define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+ extern struct ia64_machine_vector ia64_mv;
+ extern void set_iommu_machvec(void);
+
+@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *daddr, gfp_t gfp)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ void *caddr;
+
+ caddr = ops->alloc_coherent(dev, size, daddr, gfp);
+@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *caddr, dma_addr_t daddr)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ debug_dma_free_coherent(dev, size, caddr, daddr);
+ ops->free_coherent(dev, size, caddr, daddr);
+ }
+@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->mapping_error(dev, daddr);
+ }
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->dma_supported(dev, mask);
+ }
+
+diff -urNp linux-2.6.35.4/arch/ia64/include/asm/elf.h linux-2.6.35.4/arch/ia64/include/asm/elf.h
+--- linux-2.6.35.4/arch/ia64/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff -urNp linux-2.6.35.4/arch/ia64/include/asm/machvec.h linux-2.6.35.4/arch/ia64/include/asm/machvec.h
+--- linux-2.6.35.4/arch/ia64/include/asm/machvec.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/include/asm/machvec.h 2010-09-17 20:12:09.000000000 -0400
+@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
+ /* DMA-mapping interface: */
+ typedef void ia64_mv_dma_init (void);
+ typedef u64 ia64_mv_dma_get_required_mask (struct device *);
+-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
++typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
+
+ /*
+ * WARNING: The legacy I/O space is _architected_. Platforms are
+@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
+ # endif /* CONFIG_IA64_GENERIC */
+
+ extern void swiotlb_dma_init(void);
+-extern struct dma_map_ops *dma_get_ops(struct device *);
++extern const struct dma_map_ops *dma_get_ops(struct device *);
+
+ /*
+ * Define default versions so we can extend machvec for new platforms without having
+diff -urNp linux-2.6.35.4/arch/ia64/include/asm/pgtable.h linux-2.6.35.4/arch/ia64/include/asm/pgtable.h
+--- linux-2.6.35.4/arch/ia64/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -urNp linux-2.6.35.4/arch/ia64/include/asm/uaccess.h linux-2.6.35.4/arch/ia64/include/asm/uaccess.h
+--- linux-2.6.35.4/arch/ia64/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
+@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
+ const void *__cu_from = (from); \
+ long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ __cu_len; \
+ })
+@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
+ long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+ })
+diff -urNp linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c
+--- linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c 2010-09-17 20:12:09.000000000 -0400
+@@ -3,7 +3,7 @@
+ /* Set this to 1 if there is a HW IOMMU in the system */
+ int iommu_detected __read_mostly;
+
+-struct dma_map_ops *dma_ops;
++const struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+@@ -16,7 +16,7 @@ static int __init dma_init(void)
+ }
+ fs_initcall(dma_init);
+
+-struct dma_map_ops *dma_get_ops(struct device *dev)
++const struct dma_map_ops *dma_get_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+diff -urNp linux-2.6.35.4/arch/ia64/kernel/module.c linux-2.6.35.4/arch/ia64/kernel/module.c
+--- linux-2.6.35.4/arch/ia64/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
+@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff -urNp linux-2.6.35.4/arch/ia64/kernel/pci-dma.c linux-2.6.35.4/arch/ia64/kernel/pci-dma.c
+--- linux-2.6.35.4/arch/ia64/kernel/pci-dma.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/kernel/pci-dma.c 2010-09-17 20:12:09.000000000 -0400
+@@ -43,7 +43,7 @@ struct device fallback_dev = {
+ .dma_mask = &fallback_dev.coherent_dma_mask,
+ };
+
+-extern struct dma_map_ops intel_dma_ops;
++extern const struct dma_map_ops intel_dma_ops;
+
+ static int __init pci_iommu_init(void)
+ {
+diff -urNp linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c
+--- linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+ }
+
+-struct dma_map_ops swiotlb_dma_ops = {
++const struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = ia64_swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_page = swiotlb_map_page,
+diff -urNp linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
+diff -urNp linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S
+--- linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S 2010-09-17 20:12:09.000000000 -0400
+@@ -196,7 +196,7 @@ SECTIONS
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
+ * into percpu page size
+ */
+diff -urNp linux-2.6.35.4/arch/ia64/mm/fault.c linux-2.6.35.4/arch/ia64/mm/fault.c
+--- linux-2.6.35.4/arch/ia64/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the
+diff -urNp linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c
+--- linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400
+@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
+diff -urNp linux-2.6.35.4/arch/ia64/mm/init.c linux-2.6.35.4/arch/ia64/mm/init.c
+--- linux-2.6.35.4/arch/ia64/mm/init.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/mm/init.c 2010-09-17 20:12:09.000000000 -0400
+@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff -urNp linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c
+--- linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c 2010-09-17 20:12:09.000000000 -0400
+@@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *
+ return ret;
+ }
+
+-static struct dma_map_ops sn_dma_ops = {
++static const struct dma_map_ops sn_dma_ops = {
+ .alloc_coherent = sn_dma_alloc_coherent,
+ .free_coherent = sn_dma_free_coherent,
+ .map_page = sn_dma_map_page,
+diff -urNp linux-2.6.35.4/arch/m32r/lib/usercopy.c linux-2.6.35.4/arch/m32r/lib/usercopy.c
+--- linux-2.6.35.4/arch/m32r/lib/usercopy.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/m32r/lib/usercopy.c 2010-09-17 20:12:09.000000000 -0400
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/device.h linux-2.6.35.4/arch/microblaze/include/asm/device.h
+--- linux-2.6.35.4/arch/microblaze/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/microblaze/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400
+@@ -13,7 +13,7 @@ struct device_node;
+
+ struct dev_archdata {
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ void *dma_data;
+ };
+
+diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -43,14 +43,14 @@ static inline unsigned long device_to_ma
+ return 0xfffffffful;
+ }
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+ /*
+ * Available generic sets of operations
+ */
+-extern struct dma_map_ops dma_direct_ops;
++extern const struct dma_map_ops dma_direct_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ /* We don't handle the NULL dev case for ISA for now. We could
+ * do it via an out of line call but it is not needed for now. The
+@@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm
+ return dev->archdata.dma_ops;
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
+ {
+ dev->archdata.dma_ops = ops;
+ }
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (unlikely(!ops))
+ return 0;
+@@ -87,7 +87,7 @@ static inline int dma_supported(struct d
+
+ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (unlikely(ops == NULL))
+ return -EIO;
+@@ -103,7 +103,7 @@ static inline int dma_set_mask(struct de
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+@@ -117,7 +117,7 @@ static inline int dma_mapping_error(stru
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ BUG_ON(!ops);
+@@ -131,7 +131,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/pci.h linux-2.6.35.4/arch/microblaze/include/asm/pci.h
+--- linux-2.6.35.4/arch/microblaze/include/asm/pci.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/microblaze/include/asm/pci.h 2010-09-17 20:12:09.000000000 -0400
+@@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_
+ }
+
+ #ifdef CONFIG_PCI
+-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
+-extern struct dma_map_ops *get_pci_dma_ops(void);
++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
++extern const struct dma_map_ops *get_pci_dma_ops(void);
+ #else /* CONFIG_PCI */
+ #define set_pci_dma_ops(d)
+ #define get_pci_dma_ops() NULL
+diff -urNp linux-2.6.35.4/arch/microblaze/kernel/dma.c linux-2.6.35.4/arch/microblaze/kernel/dma.c
+--- linux-2.6.35.4/arch/microblaze/kernel/dma.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/microblaze/kernel/dma.c 2010-09-17 20:12:09.000000000 -0400
+@@ -133,7 +133,7 @@ static inline void dma_direct_unmap_page
+ __dma_sync_page(dma_address, 0 , size, direction);
+ }
+
+-struct dma_map_ops dma_direct_ops = {
++const struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+diff -urNp linux-2.6.35.4/arch/microblaze/pci/pci-common.c linux-2.6.35.4/arch/microblaze/pci/pci-common.c
+--- linux-2.6.35.4/arch/microblaze/pci/pci-common.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/microblaze/pci/pci-common.c 2010-09-17 20:12:09.000000000 -0400
+@@ -46,14 +46,14 @@ resource_size_t isa_mem_base;
+ /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
+ unsigned int pci_flags;
+
+-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+
+-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
++void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+ {
+ pci_dma_ops = dma_ops;
+ }
+
+-struct dma_map_ops *get_pci_dma_ops(void)
++const struct dma_map_ops *get_pci_dma_ops(void)
+ {
+ return pci_dma_ops;
+ }
+diff -urNp linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c
+--- linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -110,7 +110,7 @@ static void db1x_pm_end(void)
+
+ }
+
+-static struct platform_suspend_ops db1x_pm_ops = {
++static const struct platform_suspend_ops db1x_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ .begin = db1x_pm_begin,
+ .enter = db1x_pm_enter,
+diff -urNp linux-2.6.35.4/arch/mips/include/asm/compat.h linux-2.6.35.4/arch/mips/include/asm/compat.h
+--- linux-2.6.35.4/arch/mips/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compa
+ return (u32)(unsigned long)uptr;
+ }
+
+-static inline void __user *compat_alloc_user_space(long len)
++static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+ struct pt_regs *regs = (struct pt_regs *)
+ ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
+diff -urNp linux-2.6.35.4/arch/mips/include/asm/elf.h linux-2.6.35.4/arch/mips/include/asm/elf.h
+--- linux-2.6.35.4/arch/mips/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -368,6 +368,13 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+diff -urNp linux-2.6.35.4/arch/mips/include/asm/page.h linux-2.6.35.4/arch/mips/include/asm/page.h
+--- linux-2.6.35.4/arch/mips/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400
+@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff -urNp linux-2.6.35.4/arch/mips/include/asm/system.h linux-2.6.35.4/arch/mips/include/asm/system.h
+--- linux-2.6.35.4/arch/mips/include/asm/system.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/include/asm/system.h 2010-09-17 20:12:09.000000000 -0400
+@@ -234,6 +234,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ALMASK)
+
+ #endif /* _ASM_SYSTEM_H */
+diff -urNp linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff -urNp linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff -urNp linux-2.6.35.4/arch/mips/kernel/kgdb.c linux-2.6.35.4/arch/mips/kernel/kgdb.c
+--- linux-2.6.35.4/arch/mips/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -270,6 +270,7 @@ int kgdb_arch_handle_exception(int vecto
+ return -1;
+ }
+
++/* cannot be const, see kgdb_arch_init */
+ struct kgdb_arch arch_kgdb_ops;
+
+ /*
+diff -urNp linux-2.6.35.4/arch/mips/kernel/process.c linux-2.6.35.4/arch/mips/kernel/process.c
+--- linux-2.6.35.4/arch/mips/kernel/process.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/kernel/process.c 2010-09-17 20:12:09.000000000 -0400
+@@ -474,15 +474,3 @@ unsigned long get_wchan(struct task_stru
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff -urNp linux-2.6.35.4/arch/mips/kernel/syscall.c linux-2.6.35.4/arch/mips/kernel/syscall.c
+--- linux-2.6.35.4/arch/mips/kernel/syscall.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/kernel/syscall.c 2010-09-17 20:12:09.000000000 -0400
+@@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(str
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+ vmm = find_vma(current->mm, addr);
+- if (task_size - len >= addr &&
+- (!vmm || addr + len <= vmm->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ }
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+@@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(str
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (task_size - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (do_color_align)
+diff -urNp linux-2.6.35.4/arch/mips/loongson/common/pm.c linux-2.6.35.4/arch/mips/loongson/common/pm.c
+--- linux-2.6.35.4/arch/mips/loongson/common/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/loongson/common/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspe
+ }
+ }
+
+-static struct platform_suspend_ops loongson_pm_ops = {
++static const struct platform_suspend_ops loongson_pm_ops = {
+ .valid = loongson_pm_valid_state,
+ .enter = loongson_pm_enter,
+ };
+diff -urNp linux-2.6.35.4/arch/mips/mm/fault.c linux-2.6.35.4/arch/mips/mm/fault.c
+--- linux-2.6.35.4/arch/mips/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/mips/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -26,6 +26,23 @@
+ #include <asm/ptrace.h>
+ #include <asm/highmem.h> /* For VMALLOC_END */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -urNp linux-2.6.35.4/arch/parisc/include/asm/compat.h linux-2.6.35.4/arch/parisc/include/asm/compat.h
+--- linux-2.6.35.4/arch/parisc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compa
+ return (u32)(unsigned long)uptr;
+ }
+
+-static __inline__ void __user *compat_alloc_user_space(long len)
++static __inline__ void __user *arch_compat_alloc_user_space(long len)
+ {
+ struct pt_regs *regs = &current->thread.regs;
+ return (void __user *)regs->gr[30];
+diff -urNp linux-2.6.35.4/arch/parisc/include/asm/elf.h linux-2.6.35.4/arch/parisc/include/asm/elf.h
+--- linux-2.6.35.4/arch/parisc/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -urNp linux-2.6.35.4/arch/parisc/include/asm/pgtable.h linux-2.6.35.4/arch/parisc/include/asm/pgtable.h
+--- linux-2.6.35.4/arch/parisc/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
+@@ -207,6 +207,17 @@
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -urNp linux-2.6.35.4/arch/parisc/kernel/module.c linux-2.6.35.4/arch/parisc/kernel/module.c
+--- linux-2.6.35.4/arch/parisc/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
+@@ -96,16 +96,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
+-
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
++
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff -urNp linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c 2010-09-17 20:12:09.000000000 -0400
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+diff -urNp linux-2.6.35.4/arch/parisc/kernel/traps.c linux-2.6.35.4/arch/parisc/kernel/traps.c
+--- linux-2.6.35.4/arch/parisc/kernel/traps.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/kernel/traps.c 2010-09-17 20:12:09.000000000 -0400
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -urNp linux-2.6.35.4/arch/parisc/mm/fault.c linux-2.6.35.4/arch/parisc/mm/fault.c
+--- linux-2.6.35.4/arch/parisc/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/parisc/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/compat.h linux-2.6.35.4/arch/powerpc/include/asm/compat.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compa
+ return (u32)(unsigned long)uptr;
+ }
+
+-static inline void __user *compat_alloc_user_space(long len)
++static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+ struct pt_regs *regs = current->thread.regs;
+ unsigned long usp = regs->gpr[1];
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/device.h linux-2.6.35.4/arch/powerpc/include/asm/device.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400
+@@ -11,7 +11,7 @@ struct device_node;
+
+ struct dev_archdata {
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+
+ /*
+ * When an iommu is in use, dma_data is used as a ptr to the base of the
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -66,12 +66,13 @@ static inline unsigned long device_to_ma
+ /*
+ * Available generic sets of operations
+ */
++/* cannot be const */
+ #ifdef CONFIG_PPC64
+ extern struct dma_map_ops dma_iommu_ops;
+ #endif
+-extern struct dma_map_ops dma_direct_ops;
++extern const struct dma_map_ops dma_direct_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ /* We don't handle the NULL dev case for ISA for now. We could
+ * do it via an out of line call but it is not needed for now. The
+@@ -84,7 +85,7 @@ static inline struct dma_map_ops *get_dm
+ return dev->archdata.dma_ops;
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
+ {
+ dev->archdata.dma_ops = ops;
+ }
+@@ -118,7 +119,7 @@ static inline void set_dma_offset(struct
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return 0;
+@@ -129,7 +130,7 @@ static inline int dma_supported(struct d
+
+ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return -EIO;
+@@ -144,7 +145,7 @@ static inline int dma_set_mask(struct de
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!dma_ops);
+@@ -159,7 +160,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+
+@@ -170,7 +171,7 @@ static inline void dma_free_coherent(str
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/elf.h linux-2.6.35.4/arch/powerpc/include/asm/elf.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/iommu.h linux-2.6.35.4/arch/powerpc/include/asm/iommu.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/iommu.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/iommu.h 2010-09-17 20:12:09.000000000 -0400
+@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
+ extern void iommu_init_early_dart(void);
+ extern void iommu_init_early_pasemi(void);
+
++/* dma-iommu.c */
++extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
++
+ #ifdef CONFIG_PCI
+ extern void pci_iommu_init(void);
+ extern void pci_direct_iommu_init(void);
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -27,6 +27,7 @@ enum km_type {
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_KDB,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/page_64.h linux-2.6.35.4/arch/powerpc/include/asm/page_64.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/page_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/page_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -172,15 +172,18 @@ do { \
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/page.h linux-2.6.35.4/arch/powerpc/include/asm/page.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400
+@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ #undef STRICT_MM_TYPECHECKS
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/pci.h linux-2.6.35.4/arch/powerpc/include/asm/pci.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/pci.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/pci.h 2010-09-17 20:12:09.000000000 -0400
+@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
+ }
+
+ #ifdef CONFIG_PCI
+-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
+-extern struct dma_map_ops *get_pci_dma_ops(void);
++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
++extern const struct dma_map_ops *get_pci_dma_ops(void);
+ #else /* CONFIG_PCI */
+ #define set_pci_dma_ops(d)
+ #define get_pci_dma_ops() NULL
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/reg.h linux-2.6.35.4/arch/powerpc/include/asm/reg.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/reg.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/reg.h 2010-09-17 20:12:09.000000000 -0400
+@@ -191,6 +191,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h 2010-09-17 20:12:09.000000000 -0400
+@@ -13,7 +13,7 @@
+
+ #include <linux/swiotlb.h>
+
+-extern struct dma_map_ops swiotlb_dma_ops;
++extern const struct dma_map_ops swiotlb_dma_ops;
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h
+--- linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
+@@ -13,6 +13,8 @@
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -327,52 +329,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma.c linux-2.6.35.4/arch/powerpc/kernel/dma.c
+--- linux-2.6.35.4/arch/powerpc/kernel/dma.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/dma.c 2010-09-17 20:12:09.000000000 -0400
+@@ -135,7 +135,7 @@ static inline void dma_direct_sync_singl
+ }
+ #endif
+
+-struct dma_map_ops dma_direct_ops = {
++const struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c
+--- linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
+ }
+
+ /* We support DMA to/from any memory page via the iommu */
+-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
++int dma_iommu_dma_supported(struct device *dev, u64 mask)
+ {
+ struct iommu_table *tbl = get_iommu_table_base(dev);
+
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c
+--- linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
+ * map_page, and unmap_page on highmem, use normal dma_ops
+ * for everything else.
+ */
+-struct dma_map_ops swiotlb_dma_ops = {
++const struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S
+--- linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S 2010-09-17 20:12:09.000000000 -0400
+@@ -455,6 +455,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -464,8 +465,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b .ret_from_except_lite
+-1: bl .save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .bad_page_fault
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S
+--- linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S 2010-09-17 20:12:09.000000000 -0400
+@@ -840,10 +840,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+- bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c
+--- linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c 2010-09-17 20:12:09.000000000 -0400
+@@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct
+ return 1;
+ }
+
+-static struct dma_map_ops ibmebus_dma_ops = {
++static const struct dma_map_ops ibmebus_dma_ops = {
+ .alloc_coherent = ibmebus_alloc_coherent,
+ .free_coherent = ibmebus_free_coherent,
+ .map_sg = ibmebus_map_sg,
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/kgdb.c linux-2.6.35.4/arch/powerpc/kernel/kgdb.c
+--- linux-2.6.35.4/arch/powerpc/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -128,7 +128,7 @@ static int kgdb_handle_breakpoint(struct
+ if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
+ return 0;
+
+- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
++ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
+ regs->nip += 4;
+
+ return 1;
+@@ -360,7 +360,7 @@ int kgdb_arch_handle_exception(int vecto
+ /*
+ * Global data
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
+ };
+
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/module_32.c linux-2.6.35.4/arch/powerpc/kernel/module_32.c
+--- linux-2.6.35.4/arch/powerpc/kernel/module_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/module_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/module.c linux-2.6.35.4/arch/powerpc/kernel/module.c
+--- linux-2.6.35.4/arch/powerpc/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
+@@ -31,11 +31,24 @@
+
+ LIST_HEAD(module_bug_list);
+
++#ifdef CONFIG_PAX_KERNEXEC
+ void *module_alloc(unsigned long size)
+ {
+ if (size == 0)
+ return NULL;
+
++ return vmalloc(size);
++}
++
++void *module_alloc_exec(unsigned long size)
++#else
++void *module_alloc(unsigned long size)
++#endif
++
++{
++ if (size == 0)
++ return NULL;
++
+ return vmalloc_exec(size);
+ }
+
+@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
+ vfree(module_region);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++#endif
++
+ static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/pci-common.c linux-2.6.35.4/arch/powerpc/kernel/pci-common.c
+--- linux-2.6.35.4/arch/powerpc/kernel/pci-common.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/pci-common.c 2010-09-17 20:12:09.000000000 -0400
+@@ -51,14 +51,14 @@ resource_size_t isa_mem_base;
+ unsigned int ppc_pci_flags = 0;
+
+
+-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+
+-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
++void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+ {
+ pci_dma_ops = dma_ops;
+ }
+
+-struct dma_map_ops *get_pci_dma_ops(void)
++const struct dma_map_ops *get_pci_dma_ops(void)
+ {
+ return pci_dma_ops;
+ }
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/process.c linux-2.6.35.4/arch/powerpc/kernel/process.c
+--- linux-2.6.35.4/arch/powerpc/kernel/process.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/process.c 2010-09-17 20:12:09.000000000 -0400
+@@ -1215,51 +1215,3 @@ unsigned long arch_align_stack(unsigned
+ sp -= get_random_int() & ~PAGE_MASK;
+ return sp & ~0xf;
+ }
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/signal_32.c linux-2.6.35.4/arch/powerpc/kernel/signal_32.c
+--- linux-2.6.35.4/arch/powerpc/kernel/signal_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/signal_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/signal_64.c linux-2.6.35.4/arch/powerpc/kernel/signal_64.c
+--- linux-2.6.35.4/arch/powerpc/kernel/signal_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/signal_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/vdso.c linux-2.6.35.4/arch/powerpc/kernel/vdso.c
+--- linux-2.6.35.4/arch/powerpc/kernel/vdso.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/vdso.c 2010-09-17 20:12:09.000000000 -0400
+@@ -36,6 +36,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff -urNp linux-2.6.35.4/arch/powerpc/kernel/vio.c linux-2.6.35.4/arch/powerpc/kernel/vio.c
+--- linux-2.6.35.4/arch/powerpc/kernel/vio.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/kernel/vio.c 2010-09-17 20:12:09.000000000 -0400
+@@ -602,11 +602,12 @@ static void vio_dma_iommu_unmap_sg(struc
+ vio_cmo_dealloc(viodev, alloc_size);
+ }
+
+-struct dma_map_ops vio_dma_mapping_ops = {
++static const struct dma_map_ops vio_dma_mapping_ops = {
+ .alloc_coherent = vio_dma_iommu_alloc_coherent,
+ .free_coherent = vio_dma_iommu_free_coherent,
+ .map_sg = vio_dma_iommu_map_sg,
+ .unmap_sg = vio_dma_iommu_unmap_sg,
++ .dma_supported = dma_iommu_dma_supported,
+ .map_page = vio_dma_iommu_map_page,
+ .unmap_page = vio_dma_iommu_unmap_page,
+
+@@ -860,7 +861,6 @@ static void vio_cmo_bus_remove(struct vi
+
+ static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
+ {
+- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
+ viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
+ }
+
+diff -urNp linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c
+--- linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff -urNp linux-2.6.35.4/arch/powerpc/mm/fault.c linux-2.6.35.4/arch/powerpc/mm/fault.c
+--- linux-2.6.35.4/arch/powerpc/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
+@@ -30,6 +30,10 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/perf_event.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -41,6 +45,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/siginfo.h>
+ #include <mm/mmu_decl.h>
++#include <asm/ptrace.h>
+
+ #ifdef CONFIG_KPROBES
+ static inline int notify_page_fault(struct pt_regs *regs)
+@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -257,7 +289,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -272,7 +304,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -341,6 +373,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff -urNp linux-2.6.35.4/arch/powerpc/mm/mmap_64.c linux-2.6.35.4/arch/powerpc/mm/mmap_64.c
+--- linux-2.6.35.4/arch/powerpc/mm/mmap_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/mm/mmap_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.35.4/arch/powerpc/mm/slice.c linux-2.6.35.4/arch/powerpc/mm/slice.c
+--- linux-2.6.35.4/arch/powerpc/mm/slice.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/mm/slice.c 2010-09-17 20:12:09.000000000 -0400
+@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+ {
+ return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
+ 1ul << SLICE_LOW_SHIFT);
+@@ -256,7 +255,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff -urNp linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c
+--- linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
+ lite5200_pm_target_state = PM_SUSPEND_ON;
+ }
+
+-static struct platform_suspend_ops lite5200_pm_ops = {
++static const struct platform_suspend_ops lite5200_pm_ops = {
+ .valid = lite5200_pm_valid,
+ .begin = lite5200_pm_begin,
+ .prepare = lite5200_pm_prepare,
+diff -urNp linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+--- linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -189,7 +189,7 @@ void mpc52xx_pm_finish(void)
+ iounmap(mbar);
+ }
+
+-static struct platform_suspend_ops mpc52xx_pm_ops = {
++static const struct platform_suspend_ops mpc52xx_pm_ops = {
+ .valid = mpc52xx_pm_valid,
+ .prepare = mpc52xx_pm_prepare,
+ .enter = mpc52xx_pm_enter,
+diff -urNp linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c
+--- linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c 2010-09-17 20:12:09.000000000 -0400
+@@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void)
+ return ret;
+ }
+
+-static struct platform_suspend_ops mpc83xx_suspend_ops = {
++static const struct platform_suspend_ops mpc83xx_suspend_ops = {
+ .valid = mpc83xx_suspend_valid,
+ .begin = mpc83xx_suspend_begin,
+ .enter = mpc83xx_suspend_enter,
+diff -urNp linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c
+--- linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
+
+ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+
+-struct dma_map_ops dma_iommu_fixed_ops = {
++const struct dma_map_ops dma_iommu_fixed_ops = {
+ .alloc_coherent = dma_fixed_alloc_coherent,
+ .free_coherent = dma_fixed_free_coherent,
+ .map_sg = dma_fixed_map_sg,
+diff -urNp linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c
+--- linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c 2010-09-17 20:12:09.000000000 -0400
+@@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi
+ return mask >= DMA_BIT_MASK(32);
+ }
+
+-static struct dma_map_ops ps3_sb_dma_ops = {
++static const struct dma_map_ops ps3_sb_dma_ops = {
+ .alloc_coherent = ps3_alloc_coherent,
+ .free_coherent = ps3_free_coherent,
+ .map_sg = ps3_sb_map_sg,
+@@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops
+ .unmap_page = ps3_unmap_page,
+ };
+
+-static struct dma_map_ops ps3_ioc0_dma_ops = {
++static const struct dma_map_ops ps3_ioc0_dma_ops = {
+ .alloc_coherent = ps3_alloc_coherent,
+ .free_coherent = ps3_free_coherent,
+ .map_sg = ps3_ioc0_map_sg,
+diff -urNp linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c
+--- linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c 2010-09-17 20:12:09.000000000 -0400
+@@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_sta
+ return 1;
+ }
+
+-static struct platform_suspend_ops pmc_suspend_ops = {
++static const struct platform_suspend_ops pmc_suspend_ops = {
+ .valid = pmc_suspend_valid,
+ .enter = pmc_suspend_enter,
+ };
+diff -urNp linux-2.6.35.4/arch/s390/include/asm/compat.h linux-2.6.35.4/arch/s390/include/asm/compat.h
+--- linux-2.6.35.4/arch/s390/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -181,7 +181,7 @@ static inline int is_compat_task(void)
+
+ #endif
+
+-static inline void __user *compat_alloc_user_space(long len)
++static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+ unsigned long stack;
+
+diff -urNp linux-2.6.35.4/arch/s390/include/asm/elf.h linux-2.6.35.4/arch/s390/include/asm/elf.h
+--- linux-2.6.35.4/arch/s390/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
+ that it will "exec", and that there is sufficient room for the brk. */
+ #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+diff -urNp linux-2.6.35.4/arch/s390/include/asm/uaccess.h linux-2.6.35.4/arch/s390/include/asm/uaccess.h
+--- linux-2.6.35.4/arch/s390/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
+@@ -234,6 +234,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
+ unsigned int sz = __compiletime_object_size(to);
+
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+diff -urNp linux-2.6.35.4/arch/s390/Kconfig linux-2.6.35.4/arch/s390/Kconfig
+--- linux-2.6.35.4/arch/s390/Kconfig 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/Kconfig 2010-09-17 20:12:09.000000000 -0400
+@@ -230,13 +230,12 @@ config AUDIT_ARCH
+
+ config S390_EXEC_PROTECT
+ bool "Data execute protection"
++ default y
+ help
+ This option allows to enable a buffer overflow protection for user
+- space programs and it also selects the addressing mode option above.
+- The kernel parameter noexec=on will enable this feature and also
+- switch the addressing modes, default is disabled. Enabling this (via
+- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
+- will reduce system performance.
++ space programs.
++ Enabling this on machines earlier than IBM System z9-109 EC/BC will
++ reduce system performance.
+
+ comment "Code generation options"
+
+diff -urNp linux-2.6.35.4/arch/s390/kernel/module.c linux-2.6.35.4/arch/s390/kernel/module.c
+--- linux-2.6.35.4/arch/s390/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
+@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff -urNp linux-2.6.35.4/arch/s390/kernel/setup.c linux-2.6.35.4/arch/s390/kernel/setup.c
+--- linux-2.6.35.4/arch/s390/kernel/setup.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/kernel/setup.c 2010-09-17 20:12:09.000000000 -0400
+@@ -281,7 +281,7 @@ static int __init early_parse_mem(char *
+ }
+ early_param("mem", early_parse_mem);
+
+-unsigned int user_mode = HOME_SPACE_MODE;
++unsigned int user_mode = SECONDARY_SPACE_MODE;
+ EXPORT_SYMBOL_GPL(user_mode);
+
+ static int set_amode_and_uaccess(unsigned long user_amode,
+@@ -310,17 +310,6 @@ static int set_amode_and_uaccess(unsigne
+ }
+ }
+
+-/*
+- * Switch kernel/user addressing modes?
+- */
+-static int __init early_parse_switch_amode(char *p)
+-{
+- if (user_mode != SECONDARY_SPACE_MODE)
+- user_mode = PRIMARY_SPACE_MODE;
+- return 0;
+-}
+-early_param("switch_amode", early_parse_switch_amode);
+-
+ static int __init early_parse_user_mode(char *p)
+ {
+ if (p && strcmp(p, "primary") == 0)
+@@ -337,20 +326,6 @@ static int __init early_parse_user_mode(
+ }
+ early_param("user_mode", early_parse_user_mode);
+
+-#ifdef CONFIG_S390_EXEC_PROTECT
+-/*
+- * Enable execute protection?
+- */
+-static int __init early_parse_noexec(char *p)
+-{
+- if (!strncmp(p, "off", 3))
+- return 0;
+- user_mode = SECONDARY_SPACE_MODE;
+- return 0;
+-}
+-early_param("noexec", early_parse_noexec);
+-#endif /* CONFIG_S390_EXEC_PROTECT */
+-
+ static void setup_addressing_mode(void)
+ {
+ if (user_mode == SECONDARY_SPACE_MODE) {
+diff -urNp linux-2.6.35.4/arch/s390/mm/maccess.c linux-2.6.35.4/arch/s390/mm/maccess.c
+--- linux-2.6.35.4/arch/s390/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400
+@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void
+ return rc ? rc : count;
+ }
+
+-long probe_kernel_write(void *dst, void *src, size_t size)
++long probe_kernel_write(void *dst, const void *src, size_t size)
+ {
+ long copied = 0;
+
+diff -urNp linux-2.6.35.4/arch/s390/mm/mmap.c linux-2.6.35.4/arch/s390/mm/mmap.c
+--- linux-2.6.35.4/arch/s390/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/s390/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
+@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c
+--- linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
+ return 0;
+ }
+
+-static struct platform_suspend_ops hp6x0_pm_ops = {
++static const struct platform_suspend_ops hp6x0_pm_ops = {
+ .enter = hp6x0_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff -urNp linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -1,10 +1,10 @@
+ #ifndef __ASM_SH_DMA_MAPPING_H
+ #define __ASM_SH_DMA_MAPPING_H
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+ extern void no_iommu_init(void);
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+@@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->dma_supported)
+ return ops->dma_supported(dev, mask);
+@@ -24,7 +24,7 @@ static inline int dma_supported(struct d
+
+ static inline int dma_set_mask(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+@@ -59,7 +59,7 @@ static inline int dma_get_cache_alignmen
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+@@ -70,7 +70,7 @@ static inline int dma_mapping_error(stru
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+@@ -87,7 +87,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+ return;
+diff -urNp linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c
+--- linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c 2010-09-17 20:12:09.000000000 -0400
+@@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t s
+ return 0;
+ }
+
+-static struct platform_suspend_ops sh_pm_ops = {
++static const struct platform_suspend_ops sh_pm_ops = {
+ .enter = sh_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff -urNp linux-2.6.35.4/arch/sh/kernel/dma-nommu.c linux-2.6.35.4/arch/sh/kernel/dma-nommu.c
+--- linux-2.6.35.4/arch/sh/kernel/dma-nommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/kernel/dma-nommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device
+ }
+ #endif
+
+-struct dma_map_ops nommu_dma_ops = {
++const struct dma_map_ops nommu_dma_ops = {
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = dma_generic_free_coherent,
+ .map_page = nommu_map_page,
+diff -urNp linux-2.6.35.4/arch/sh/kernel/kgdb.c linux-2.6.35.4/arch/sh/kernel/kgdb.c
+--- linux-2.6.35.4/arch/sh/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
+@@ -319,7 +319,7 @@ void kgdb_arch_exit(void)
+ unregister_die_notifier(&kgdb_notifier);
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: trapa #0x3c */
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ .gdb_bpt_instr = { 0x3c, 0xc3 },
+diff -urNp linux-2.6.35.4/arch/sh/mm/consistent.c linux-2.6.35.4/arch/sh/mm/consistent.c
+--- linux-2.6.35.4/arch/sh/mm/consistent.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/mm/consistent.c 2010-09-17 20:12:09.000000000 -0400
+@@ -22,7 +22,7 @@
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+-struct dma_map_ops *dma_ops;
++const struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ static int __init dma_init(void)
+diff -urNp linux-2.6.35.4/arch/sh/mm/mmap.c linux-2.6.35.4/arch/sh/mm/mmap.c
+--- linux-2.6.35.4/arch/sh/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sh/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h
+--- linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+
+@@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(1, v);
++}
+
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+@@ -59,10 +89,26 @@ extern long atomic64_sub_ret(long, atomi
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+@@ -72,17 +118,28 @@ extern long atomic64_sub_ret(long, atomi
+
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -93,17 +150,28 @@ static inline int atomic_add_unless(atom
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/compat.h linux-2.6.35.4/arch/sparc/include/asm/compat.h
+--- linux-2.6.35.4/arch/sparc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compa
+ return (u32)(unsigned long)uptr;
+ }
+
+-static inline void __user *compat_alloc_user_space(long len)
++static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+ struct pt_regs *regs = current_thread_info()->kregs;
+ unsigned long usp = regs->u_regs[UREG_I6];
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -13,10 +13,10 @@ extern int dma_supported(struct device *
+ #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+ #define dma_is_consistent(d, h) (1)
+
+-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
++extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
+ extern struct bus_type pci_bus_type;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
+ if (dev->bus == &pci_bus_type)
+@@ -30,7 +30,7 @@ static inline struct dma_map_ops *get_dm
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
+@@ -41,7 +41,7 @@ static inline void *dma_alloc_coherent(s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free_coherent(dev, size, cpu_addr, dma_handle);
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/elf_32.h linux-2.6.35.4/arch/sparc/include/asm/elf_32.h
+--- linux-2.6.35.4/arch/sparc/include/asm/elf_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/elf_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/elf_64.h linux-2.6.35.4/arch/sparc/include/asm/elf_64.h
+--- linux-2.6.35.4/arch/sparc/include/asm/elf_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/elf_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -162,6 +162,12 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h
+--- linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h
+--- linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h 2010-09-17 20:12:09.000000000 -0400
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h
+--- linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_r
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_r
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+ static int inline arch_read_trylock(arch_rwlock_t *lock)
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h
+--- linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -249,14 +249,25 @@ extern unsigned long __copy_user(void __
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+@@ -272,19 +283,27 @@ static inline unsigned long copy_from_us
+ {
+ int sz = __compiletime_object_size(to);
+
++ if ((long)n < 0)
++ return n;
++
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+ }
+
+- if (n && __access_ok((unsigned long) from, n))
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h
+--- linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/system.h>
+ #include <asm/spitfire.h>
+@@ -224,6 +225,12 @@ copy_from_user(void *to, const void __us
+ int sz = __compiletime_object_size(to);
+ unsigned long ret = size;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
+ if (likely(sz == -1 || sz >= size)) {
+ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+@@ -243,8 +250,15 @@ extern unsigned long copy_to_user_fixup(
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
++
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
+
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess.h linux-2.6.35.4/arch/sparc/include/asm/uaccess.h
+--- linux-2.6.35.4/arch/sparc/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
+@@ -1,5 +1,13 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++#endif
++#endif
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/iommu.c linux-2.6.35.4/arch/sparc/kernel/iommu.c
+--- linux-2.6.35.4/arch/sparc/kernel/iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struc
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-static struct dma_map_ops sun4u_dma_ops = {
++static const struct dma_map_ops sun4u_dma_ops = {
+ .alloc_coherent = dma_4u_alloc_coherent,
+ .free_coherent = dma_4u_free_coherent,
+ .map_page = dma_4u_map_page,
+@@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops
+ .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ };
+
+-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
++const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/ioport.c linux-2.6.35.4/arch/sparc/kernel/ioport.c
+--- linux-2.6.35.4/arch/sparc/kernel/ioport.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/ioport.c 2010-09-17 20:12:09.000000000 -0400
+@@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(stru
+ BUG();
+ }
+
+-struct dma_map_ops sbus_dma_ops = {
++const struct dma_map_ops sbus_dma_ops = {
+ .alloc_coherent = sbus_alloc_coherent,
+ .free_coherent = sbus_free_coherent,
+ .map_page = sbus_map_page,
+@@ -408,7 +408,7 @@ struct dma_map_ops sbus_dma_ops = {
+ .sync_sg_for_device = sbus_sync_sg_for_device,
+ };
+
+-struct dma_map_ops *dma_ops = &sbus_dma_ops;
++const struct dma_map_ops *dma_ops = &sbus_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ static int __init sparc_register_ioport(void)
+@@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(str
+ }
+ }
+
+-struct dma_map_ops pci32_dma_ops = {
++const struct dma_map_ops pci32_dma_ops = {
+ .alloc_coherent = pci32_alloc_coherent,
+ .free_coherent = pci32_free_coherent,
+ .map_page = pci32_map_page,
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c
+--- linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
+ regs->npc = regs->pc + 4;
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: ta 0x7d */
+ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
+ };
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c
+--- linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
+ regs->tnpc = regs->tpc + 4;
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: ta 0x72 */
+ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
+ };
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/Makefile linux-2.6.35.4/arch/sparc/kernel/Makefile
+--- linux-2.6.35.4/arch/sparc/kernel/Makefile 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/Makefile 2010-09-17 20:12:09.000000000 -0400
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c
+--- linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c 2010-09-17 20:12:09.000000000 -0400
+@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-static struct dma_map_ops sun4v_dma_ops = {
++static const struct dma_map_ops sun4v_dma_ops = {
+ .alloc_coherent = dma_4v_alloc_coherent,
+ .free_coherent = dma_4v_free_coherent,
+ .map_page = dma_4v_map_page,
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c
+--- linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c
+--- linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -174,14 +177,14 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_str
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff -urNp linux-2.6.35.4/arch/sparc/kernel/traps_64.c linux-2.6.35.4/arch/sparc/kernel/traps_64.c
+--- linux-2.6.35.4/arch/sparc/kernel/traps_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/kernel/traps_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+diff -urNp linux-2.6.35.4/arch/sparc/lib/atomic_64.S linux-2.6.35.4/arch/sparc/lib/atomic_64.S
+--- linux-2.6.35.4/arch/sparc/lib/atomic_64.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/lib/atomic_64.S 2010-09-17 20:12:37.000000000 -0400
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
++ .globl atomic_add_ret_unchecked
++ .type atomic_add_ret_unchecked,#function
++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ addcc %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ add %g7, %o0, %g7
++ sra %g7, 0, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
++
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
++ .globl atomic64_add_unchecked
++ .type atomic64_add_unchecked,#function
++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
++
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
++ .globl atomic64_sub_unchecked
++ .type atomic64_sub_unchecked,#function
++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ subcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
++
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
++ .globl atomic64_add_ret_unchecked
++ .type atomic64_add_ret_unchecked,#function
++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ add %g7, %o0, %g7
++ mov %g7, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
++
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+diff -urNp linux-2.6.35.4/arch/sparc/lib/ksyms.c linux-2.6.35.4/arch/sparc/lib/ksyms.c
+--- linux-2.6.35.4/arch/sparc/lib/ksyms.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/lib/ksyms.c 2010-09-17 20:12:09.000000000 -0400
+@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+
+ /* Atomic bit operations. */
+diff -urNp linux-2.6.35.4/arch/sparc/lib/rwsem_64.S linux-2.6.35.4/arch/sparc/lib/rwsem_64.S
+--- linux-2.6.35.4/arch/sparc/lib/rwsem_64.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/lib/rwsem_64.S 2010-09-17 20:12:09.000000000 -0400
+@@ -11,7 +11,12 @@
+ .globl __down_read
+ __down_read:
+ 1: lduw [%o0], %g1
+- add %g1, 1, %g7
++ addcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 1b
+@@ -33,7 +38,12 @@ __down_read:
+ .globl __down_read_trylock
+ __down_read_trylock:
+ 1: lduw [%o0], %g1
+- add %g1, 1, %g7
++ addcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cmp %g7, 0
+ bl,pn %icc, 2f
+ mov 0, %o1
+@@ -51,7 +61,12 @@ __down_write:
+ or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- add %g3, %g1, %g7
++ addcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -77,7 +92,12 @@ __down_write_trylock:
+ cmp %g3, 0
+ bne,pn %icc, 2f
+ mov 0, %o1
+- add %g3, %g1, %g7
++ addcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -90,7 +110,12 @@ __down_write_trylock:
+ __up_read:
+ 1:
+ lduw [%o0], %g1
+- sub %g1, 1, %g7
++ subcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 1b
+@@ -118,7 +143,12 @@ __up_write:
+ or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- sub %g3, %g1, %g7
++ subcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -143,7 +173,12 @@ __downgrade_write:
+ or %g1, %lo(RWSEM_WAITING_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- sub %g3, %g1, %g7
++ subcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+diff -urNp linux-2.6.35.4/arch/sparc/Makefile linux-2.6.35.4/arch/sparc/Makefile
+--- linux-2.6.35.4/arch/sparc/Makefile 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/Makefile 2010-09-17 20:12:37.000000000 -0400
+@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff -urNp linux-2.6.35.4/arch/sparc/mm/fault_32.c linux-2.6.35.4/arch/sparc/mm/fault_32.c
+--- linux-2.6.35.4/arch/sparc/mm/fault_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/mm/fault_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -22,6 +22,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -282,6 +547,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -urNp linux-2.6.35.4/arch/sparc/mm/fault_64.c linux-2.6.35.4/arch/sparc/mm/fault_64.c
+--- linux-2.6.35.4/arch/sparc/mm/fault_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/mm/fault_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -21,6 +21,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c
+--- linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400
+@@ -68,7 +68,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+diff -urNp linux-2.6.35.4/arch/sparc/mm/init_32.c linux-2.6.35.4/arch/sparc/mm/init_32.c
+--- linux-2.6.35.4/arch/sparc/mm/init_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/mm/init_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -318,6 +318,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -346,17 +349,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -urNp linux-2.6.35.4/arch/sparc/mm/Makefile linux-2.6.35.4/arch/sparc/mm/Makefile
+--- linux-2.6.35.4/arch/sparc/mm/Makefile 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/mm/Makefile 2010-09-17 20:12:09.000000000 -0400
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
+ obj-y += fault_$(BITS).o
+diff -urNp linux-2.6.35.4/arch/sparc/mm/srmmu.c linux-2.6.35.4/arch/sparc/mm/srmmu.c
+--- linux-2.6.35.4/arch/sparc/mm/srmmu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/sparc/mm/srmmu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -2198,6 +2198,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff -urNp linux-2.6.35.4/arch/um/include/asm/kmap_types.h linux-2.6.35.4/arch/um/include/asm/kmap_types.h
+--- linux-2.6.35.4/arch/um/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/um/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff -urNp linux-2.6.35.4/arch/um/include/asm/page.h linux-2.6.35.4/arch/um/include/asm/page.h
+--- linux-2.6.35.4/arch/um/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/um/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff -urNp linux-2.6.35.4/arch/um/sys-i386/syscalls.c linux-2.6.35.4/arch/um/sys-i386/syscalls.c
+--- linux-2.6.35.4/arch/um/sys-i386/syscalls.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/um/sys-i386/syscalls.c 2010-09-17 20:12:09.000000000 -0400
+@@ -11,6 +11,21 @@
+ #include "asm/uaccess.h"
+ #include "asm/unistd.h"
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * The prototype on i386 is:
+ *
+diff -urNp linux-2.6.35.4/arch/x86/boot/bitops.h linux-2.6.35.4/arch/x86/boot/bitops.h
+--- linux-2.6.35.4/arch/x86/boot/bitops.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/bitops.h 2010-09-17 20:12:09.000000000 -0400
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff -urNp linux-2.6.35.4/arch/x86/boot/boot.h linux-2.6.35.4/arch/x86/boot/boot.h
+--- linux-2.6.35.4/arch/x86/boot/boot.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/boot.h 2010-09-17 20:12:09.000000000 -0400
+@@ -82,7 +82,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/head_32.S linux-2.6.35.4/arch/x86/boot/compressed/head_32.S
+--- linux-2.6.35.4/arch/x86/boot/compressed/head_32.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/compressed/head_32.S 2010-09-17 20:12:09.000000000 -0400
+@@ -76,7 +76,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -149,7 +149,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -157,8 +157,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/head_64.S linux-2.6.35.4/arch/x86/boot/compressed/head_64.S
+--- linux-2.6.35.4/arch/x86/boot/compressed/head_64.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/compressed/head_64.S 2010-09-17 20:12:09.000000000 -0400
+@@ -91,7 +91,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -233,7 +233,7 @@ ENTRY(startup_64)
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/misc.c linux-2.6.35.4/arch/x86/boot/compressed/misc.c
+--- linux-2.6.35.4/arch/x86/boot/compressed/misc.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/compressed/misc.c 2010-09-17 20:12:09.000000000 -0400
+@@ -285,7 +285,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -332,7 +332,7 @@ asmlinkage void decompress_kernel(void *
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c
+--- linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c 2010-09-17 20:12:09.000000000 -0400
+@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
+
+ offs = (olen > ilen) ? olen - ilen : 0;
+ offs += olen >> 12; /* Add 8 bytes for each 32K block */
+- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
++ offs += 64*1024; /* Add 64K bytes slack */
+ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
+
+ printf(".section \".rodata..compressed\",\"a\",@progbits\n");
+diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/relocs.c linux-2.6.35.4/arch/x86/boot/compressed/relocs.c
+--- linux-2.6.35.4/arch/x86/boot/compressed/relocs.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/compressed/relocs.c 2010-09-17 20:12:09.000000000 -0400
+@@ -13,8 +13,11 @@
+
+ static void die(char *fmt, ...);
+
++#include "../../../../include/generated/autoconf.h"
++
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+
+@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
+
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
+ !is_rel_reloc(sym_name(sym_strtab, sym))) {
+ continue;
+ }
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
++
+ switch (r_type) {
+ case R_386_NONE:
+ case R_386_PC32:
+@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
+
+ static void emit_relocs(int as_text)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+@@ -665,6 +725,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff -urNp linux-2.6.35.4/arch/x86/boot/cpucheck.c linux-2.6.35.4/arch/x86/boot/cpucheck.c
+--- linux-2.6.35.4/arch/x86/boot/cpucheck.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/cpucheck.c 2010-09-17 20:12:09.000000000 -0400
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff -urNp linux-2.6.35.4/arch/x86/boot/header.S linux-2.6.35.4/arch/x86/boot/header.S
+--- linux-2.6.35.4/arch/x86/boot/header.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/header.S 2010-09-17 20:12:09.000000000 -0400
+@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
+ #define VO_INIT_SIZE (VO__end - VO__text)
+diff -urNp linux-2.6.35.4/arch/x86/boot/memory.c linux-2.6.35.4/arch/x86/boot/memory.c
+--- linux-2.6.35.4/arch/x86/boot/memory.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/memory.c 2010-09-17 20:12:09.000000000 -0400
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff -urNp linux-2.6.35.4/arch/x86/boot/video.c linux-2.6.35.4/arch/x86/boot/video.c
+--- linux-2.6.35.4/arch/x86/boot/video.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/video.c 2010-09-17 20:12:09.000000000 -0400
+@@ -96,7 +96,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff -urNp linux-2.6.35.4/arch/x86/boot/video-vesa.c linux-2.6.35.4/arch/x86/boot/video-vesa.c
+--- linux-2.6.35.4/arch/x86/boot/video-vesa.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/boot/video-vesa.c 2010-09-17 20:12:09.000000000 -0400
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff -urNp linux-2.6.35.4/arch/x86/ia32/ia32entry.S linux-2.6.35.4/arch/x86/ia32/ia32entry.S
+--- linux-2.6.35.4/arch/x86/ia32/ia32entry.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/ia32/ia32entry.S 2010-09-17 20:12:37.000000000 -0400
+@@ -13,6 +13,7 @@
+ #include <asm/thread_info.h>
+ #include <asm/segment.h>
+ #include <asm/irqflags.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+@@ -50,7 +51,12 @@
+ /*
+ * Reload arg registers from stack in case ptrace changed them.
+ * We don't reload %eax because syscall_trace_enter() returned
+- * the value it wants us to use in the table lookup.
++ * the %rax value we should see. Instead, we just truncate that
++ * value to 32 bits again as we did on entry from user mode.
++ * If it's a new value set by user_regset during entry tracing,
++ * this matches the normal truncation of the user-mode value.
++ * If it's -1 to make us punt the syscall, then (u32)-1 is still
++ * an appropriately invalid value.
+ */
+ .macro LOAD_ARGS32 offset, _r9=0
+ .if \_r9
+@@ -60,6 +66,7 @@
+ movl \offset+48(%rsp),%edx
+ movl \offset+56(%rsp),%esi
+ movl \offset+64(%rsp),%edi
++ movl %eax,%eax /* zero extension */
+ .endm
+
+ .macro CFI_STARTPROC32 simple
+@@ -114,6 +121,11 @@ ENTRY(ia32_sysenter_target)
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(kernel_stack), %rsp
+ addq $(KERNEL_STACK_OFFSET),%rsp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs, here we enable it straight after entry:
+@@ -144,6 +156,12 @@ ENTRY(ia32_sysenter_target)
+ SAVE_ARGS 0,0,1
+ /* no need to do an access_ok check here because rbp has been
+ 32bit zero extended */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r10
++ add %r10,%rbp
++#endif
++
+ 1: movl (%rbp),%ebp
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+@@ -153,7 +171,7 @@ ENTRY(ia32_sysenter_target)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ CFI_REMEMBER_STATE
+ jnz sysenter_tracesys
+- cmpl $(IA32_NR_syscalls-1),%eax
++ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+ sysenter_do_call:
+ IA32_ARG_FIXUP
+@@ -166,6 +184,11 @@ sysenter_dispatch:
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++
+ andl $~TS_COMPAT,TI_status(%r10)
+ /* clear IF, that popfq doesn't enable interrupts early */
+ andl $~0x200,EFLAGS-R11(%rsp)
+@@ -195,7 +218,7 @@ sysexit_from_sys_call:
+ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+- cmpl $(IA32_NR_syscalls-1),%eax
++ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+ movl %ebx,%edi /* reload 1st syscall arg */
+ movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
+@@ -248,7 +271,7 @@ sysenter_tracesys:
+ call syscall_trace_enter
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+- cmpl $(IA32_NR_syscalls-1),%eax
++ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+ jmp sysenter_do_call
+ CFI_ENDPROC
+@@ -284,6 +307,11 @@ ENTRY(ia32_cstar_target)
+ movl %esp,%r8d
+ CFI_REGISTER rsp,r8
+ movq PER_CPU_VAR(kernel_stack),%rsp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+@@ -305,6 +333,12 @@ ENTRY(ia32_cstar_target)
+ /* no need to do an access_ok check here because r8 has been
+ 32bit zero extended */
+ /* hardware stack frame is complete now */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r10
++ add %r10,%r8
++#endif
++
+ 1: movl (%r8),%r9d
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+@@ -314,7 +348,7 @@ ENTRY(ia32_cstar_target)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ CFI_REMEMBER_STATE
+ jnz cstar_tracesys
+- cmpl $IA32_NR_syscalls-1,%eax
++ cmpq $IA32_NR_syscalls-1,%rax
+ ja ia32_badsys
+ cstar_do_call:
+ IA32_ARG_FIXUP 1
+@@ -327,6 +361,11 @@ cstar_dispatch:
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
+ jnz sysretl_audit
+ sysretl_from_sys_call:
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++
+ andl $~TS_COMPAT,TI_status(%r10)
+ RESTORE_ARGS 1,-ARG_SKIP,1,1,1
+ movl RIP-ARGOFFSET(%rsp),%ecx
+@@ -367,7 +406,7 @@ cstar_tracesys:
+ LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+ xchgl %ebp,%r9d
+- cmpl $(IA32_NR_syscalls-1),%eax
++ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
+ jmp cstar_do_call
+ END(ia32_cstar_target)
+@@ -409,6 +448,11 @@ ENTRY(ia32_syscall)
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+@@ -425,7 +469,7 @@ ENTRY(ia32_syscall)
+ orl $TS_COMPAT,TI_status(%r10)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
+ jnz ia32_tracesys
+- cmpl $(IA32_NR_syscalls-1),%eax
++ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+ ia32_do_call:
+ IA32_ARG_FIXUP
+@@ -444,7 +488,7 @@ ia32_tracesys:
+ call syscall_trace_enter
+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
+ RESTORE_REST
+- cmpl $(IA32_NR_syscalls-1),%eax
++ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
+ jmp ia32_do_call
+ END(ia32_syscall)
+diff -urNp linux-2.6.35.4/arch/x86/ia32/ia32_signal.c linux-2.6.35.4/arch/x86/ia32/ia32_signal.c
+--- linux-2.6.35.4/arch/x86/ia32/ia32_signal.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/ia32/ia32_signal.c 2010-09-17 20:12:09.000000000 -0400
+@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/alternative.h linux-2.6.35.4/arch/x86/include/asm/alternative.h
+--- linux-2.6.35.4/arch/x86/include/asm/alternative.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/alternative.h 2010-09-17 20:12:09.000000000 -0400
+@@ -91,7 +91,7 @@ static inline int alternatives_text_rese
+ " .byte 664f-663f\n" /* replacementlen */ \
+ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/apm.h linux-2.6.35.4/arch/x86/include/asm/apm.h
+--- linux-2.6.35.4/arch/x86/include/asm/apm.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/apm.h 2010-09-17 20:12:09.000000000 -0400
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/asm.h linux-2.6.35.4/arch/x86/include/asm/asm.h
+--- linux-2.6.35.4/arch/x86/include/asm/asm.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/asm.h 2010-09-17 20:12:09.000000000 -0400
+@@ -37,6 +37,12 @@
+ #define _ASM_SI __ASM_REG(si)
+ #define _ASM_DI __ASM_REG(di)
+
++#ifdef CONFIG_X86_32
++#define _ASM_INTO "into"
++#else
++#define _ASM_INTO "int $4"
++#endif
++
+ /* Exception table entry */
+ #ifdef __ASSEMBLY__
+ # define _ASM_EXTABLE(from,to) \
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h
+--- linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ #ifdef CONFIG_X86_CMPXCHG64
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h
+--- linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -22,6 +22,18 @@ static inline long atomic64_read(const a
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic64_set - set atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @i: required value
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,15 @@ static inline void atomic64_add(long i,
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -74,7 +128,16 @@ static inline int atomic64_sub_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -88,6 +151,31 @@ static inline int atomic64_sub_and_test(
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -101,7 +189,32 @@ static inline void atomic64_inc(atomic64
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -118,7 +231,20 @@ static inline int atomic64_dec_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "incq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -136,7 +262,20 @@ static inline int atomic64_inc_and_test(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ ".pushsection .fixup,\"ax\"\n"
++ "1: \n"
++ LOCK_PREFIX "decq %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -155,7 +294,16 @@ static inline int atomic64_add_negative(
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -171,7 +319,31 @@ static inline int atomic64_add_negative(
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
+ long __i = i;
+- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movq %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ long __i = i;
++ asm volatile(LOCK_PREFIX "xaddq %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -183,6 +355,10 @@ static inline long atomic64_sub_return(l
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -206,17 +382,29 @@ static inline long atomic64_xchg(atomic6
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic.h linux-2.6.35.4/arch/x86/include/asm/atomic.h
+--- linux-2.6.35.4/arch/x86/include/asm/atomic.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/atomic.h 2010-09-17 20:12:09.000000000 -0400
+@@ -26,6 +26,17 @@ static inline int atomic_read(const atom
+ }
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(at
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -157,7 +291,16 @@ static inline int atomic_add_negative(in
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -180,6 +323,46 @@ static inline int atomic_add_return(int
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = atomic_read(v);
++ atomic_set(v, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
++ * atomic_add_return_unchecked - add integer and return
++ * @v: pointer of type atomic_unchecked_t
++ * @i: integer value to add
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ int __i;
++#ifdef CONFIG_M386
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ __i = i;
+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+@@ -208,6 +391,10 @@ static inline int atomic_sub_return(int
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+@@ -231,17 +418,29 @@ static inline int atomic_xchg(atomic_t *
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INTO "\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/boot.h linux-2.6.35.4/arch/x86/include/asm/boot.h
+--- linux-2.6.35.4/arch/x86/include/asm/boot.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/boot.h 2010-09-17 20:12:09.000000000 -0400
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/cacheflush.h linux-2.6.35.4/arch/x86/include/asm/cacheflush.h
+--- linux-2.6.35.4/arch/x86/include/asm/cacheflush.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/cacheflush.h 2010-09-17 20:12:09.000000000 -0400
+@@ -66,7 +66,7 @@ static inline unsigned long get_page_mem
+ unsigned long pg_flags = pg->flags & _PGMT_MASK;
+
+ if (pg_flags == _PGMT_DEFAULT)
+- return -1;
++ return ~0UL;
+ else if (pg_flags == _PGMT_WC)
+ return _PAGE_CACHE_WC;
+ else if (pg_flags == _PGMT_UC_MINUS)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/cache.h linux-2.6.35.4/arch/x86/include/asm/cache.h
+--- linux-2.6.35.4/arch/x86/include/asm/cache.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/cache.h 2010-09-17 20:12:09.000000000 -0400
+@@ -8,6 +8,7 @@
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+ #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/checksum_32.h linux-2.6.35.4/arch/x86/include/asm/checksum_32.h
+--- linux-2.6.35.4/arch/x86/include/asm/checksum_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/checksum_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/compat.h linux-2.6.35.4/arch/x86/include/asm/compat.h
+--- linux-2.6.35.4/arch/x86/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
+@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compa
+ return (u32)(unsigned long)uptr;
+ }
+
+-static inline void __user *compat_alloc_user_space(long len)
++static inline void __user *arch_compat_alloc_user_space(long len)
+ {
+ struct pt_regs *regs = task_pt_regs(current);
+ return (void __user *)regs->sp - len;
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/cpufeature.h linux-2.6.35.4/arch/x86/include/asm/cpufeature.h
+--- linux-2.6.35.4/arch/x86/include/asm/cpufeature.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/cpufeature.h 2010-09-17 20:12:09.000000000 -0400
+@@ -323,7 +323,7 @@ static __always_inline __pure bool __sta
+ " .byte 4f - 3f\n" /* replacement len */
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/desc.h linux-2.6.35.4/arch/x86/include/asm/desc.h
+--- linux-2.6.35.4/arch/x86/include/asm/desc.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/desc.h 2010-09-17 20:12:09.000000000 -0400
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+ #include <linux/smp.h>
+
+ static inline void fill_ldt(struct desc_struct *desc,
+@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(str
+ static inline void native_write_idt_entry(gate_desc *idt, int entry,
+ const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
+ const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+ const void *desc, int type)
+ {
+ unsigned int size;
++
+ switch (type) {
+ case DESC_TSS:
+ size = sizeof(tss_desc);
+@@ -139,7 +142,10 @@ static inline void native_write_gdt_entr
+ size = sizeof(struct desc_struct);
+ break;
+ }
++
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -211,7 +217,9 @@ static inline void native_set_ldt(const
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -246,8 +254,10 @@ static inline void native_load_tls(struc
+ unsigned int i;
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -309,7 +319,7 @@ static inline void set_desc_limit(struct
+ desc->limit = (limit >> 16) & 0xf;
+ }
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -327,7 +337,7 @@ static inline void _set_gate(int gate, u
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsig
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigne
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/device.h linux-2.6.35.4/arch/x86/include/asm/device.h
+--- linux-2.6.35.4/arch/x86/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400
+@@ -6,7 +6,7 @@ struct dev_archdata {
+ void *acpi_handle;
+ #endif
+ #ifdef CONFIG_X86_64
+-struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ #endif
+ #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
+ void *iommu; /* hook for IOMMU specific extension */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h
+--- linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
+@@ -26,9 +26,9 @@ extern int iommu_merge;
+ extern struct device x86_dma_fallback_dev;
+ extern int panic_on_overflow;
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #ifdef CONFIG_X86_32
+ return dma_ops;
+@@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm
+ /* Make sure we keep the same behaviour */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+@@ -123,7 +123,7 @@ static inline void *
+ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+@@ -150,7 +150,7 @@ dma_alloc_coherent(struct device *dev, s
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ WARN_ON(irqs_disabled()); /* for portability */
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/e820.h linux-2.6.35.4/arch/x86/include/asm/e820.h
+--- linux-2.6.35.4/arch/x86/include/asm/e820.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/e820.h 2010-09-17 20:12:09.000000000 -0400
+@@ -69,7 +69,7 @@ struct e820map {
+ #define ISA_START_ADDRESS 0xa0000
+ #define ISA_END_ADDRESS 0x100000
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #ifdef __KERNEL__
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/elf.h linux-2.6.35.4/arch/x86/include/asm/elf.h
+--- linux-2.6.35.4/arch/x86/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
+@@ -237,7 +237,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -291,8 +309,7 @@ do { \
+ #define ARCH_DLINFO \
+ do { \
+ if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -303,7 +320,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_X86_ELF_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/futex.h linux-2.6.35.4/arch/x86/include/asm/futex.h
+--- linux-2.6.35.4/arch/x86/include/asm/futex.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/futex.h 2010-09-17 20:12:09.000000000 -0400
+@@ -11,17 +11,54 @@
+ #include <asm/processor.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_X86_32
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ asm volatile( \
++ "movw\t%w6, %%ds\n" \
++ "1:\t" insn "\n" \
++ "2:\tpushl\t%%ss\n" \
++ "\tpopl\t%%ds\n" \
++ "\t.section .fixup,\"ax\"\n" \
++ "3:\tmov\t%3, %1\n" \
++ "\tjmp\t2b\n" \
++ "\t.previous\n" \
++ _ASM_EXTABLE(1b, 3b) \
++ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
++
++#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ asm volatile("movw\t%w7, %%es\n" \
++ "1:\tmovl\t%%es:%2, %0\n" \
++ "\tmovl\t%0, %3\n" \
++ "\t" insn "\n" \
++ "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
++ "\tjnz\t1b\n" \
++ "3:\tpushl\t%%ss\n" \
++ "\tpopl\t%%es\n" \
++ "\t.section .fixup,\"ax\"\n" \
++ "4:\tmov\t%5, %1\n" \
++ "\tjmp\t3b\n" \
++ "\t.previous\n" \
++ _ASM_EXTABLE(1b, 4b) \
++ _ASM_EXTABLE(2b, 4b) \
++ : "=&a" (oldval), "=&r" (ret), \
++ "+m" (*uaddr), "=&r" (tem) \
++ : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
++#else
++#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 *, uaddr); \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "=r" (oldval), "=r" (ret), \
++ "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 *, uaddr); \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+@@ -34,10 +71,12 @@
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+- "+m" (*uaddr), "=&r" (tem) \
++ "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\
++ "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
++#endif
+
+-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
++static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+ {
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+@@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser
+
+ switch (op) {
+ case FUTEX_OP_SET:
++#ifdef CONFIG_X86_32
++ __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
++#else
+ __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++#endif
+ break;
+ case FUTEX_OP_ADD:
++#ifdef CONFIG_X86_32
++ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
++ uaddr, oparg);
++#else
+ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
++#endif
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
+@@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser
+ return ret;
+ }
+
+-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
++static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
+ int newval)
+ {
+
+@@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_i
+ return -ENOSYS;
+ #endif
+
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
++ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
+- "2:\t.section .fixup, \"ax\"\n"
++ asm volatile(
++#ifdef CONFIG_X86_32
++ "\tmovw %w5, %%ds\n"
++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n"
++ "2:\tpushl %%ss\n"
++ "\tpopl %%ds\n"
++#else
++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
++ "2:\n"
++#endif
++ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %2, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
++#ifdef CONFIG_X86_32
+ : "=a" (oldval), "+m" (*uaddr)
++ : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
++#else
++ : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
++#endif
+ : "memory"
+ );
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/i387.h linux-2.6.35.4/arch/x86/include/asm/i387.h
+--- linux-2.6.35.4/arch/x86/include/asm/i387.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/i387.h 2010-09-17 20:12:09.000000000 -0400
+@@ -77,6 +77,11 @@ static inline int fxrstor_checking(struc
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ asm volatile("1: rex64/fxrstor (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+@@ -127,6 +132,11 @@ static inline int fxsave_user(struct i38
+ {
+ int err;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
++#endif
++
+ asm volatile("1: rex64/fxsave (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+@@ -220,13 +230,8 @@ static inline int fxrstor_checking(struc
+ }
+
+ /* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
++ in L1 during context switch. */
++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
+
+ /*
+ * These must be called with preempt disabled
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/io.h linux-2.6.35.4/arch/x86/include/asm/io.h
+--- linux-2.6.35.4/arch/x86/include/asm/io.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/io.h 2010-09-17 20:12:09.000000000 -0400
+@@ -213,6 +213,17 @@ extern void iounmap(volatile void __iome
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ /*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/iommu.h linux-2.6.35.4/arch/x86/include/asm/iommu.h
+--- linux-2.6.35.4/arch/x86/include/asm/iommu.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/iommu.h 2010-09-17 20:12:09.000000000 -0400
+@@ -1,7 +1,7 @@
+ #ifndef _ASM_X86_IOMMU_H
+ #define _ASM_X86_IOMMU_H
+
+-extern struct dma_map_ops nommu_dma_ops;
++extern const struct dma_map_ops nommu_dma_ops;
+ extern int force_iommu, no_iommu;
+ extern int iommu_detected;
+ extern int iommu_pass_through;
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/irqflags.h linux-2.6.35.4/arch/x86/include/asm/irqflags.h
+--- linux-2.6.35.4/arch/x86/include/asm/irqflags.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/irqflags.h 2010-09-17 20:12:09.000000000 -0400
+@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
+ sti; \
+ sysexit
+
++#define GET_CR0_INTO_RDI mov %cr0, %rdi
++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
++#define GET_CR3_INTO_RDI mov %cr3, %rdi
++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/kvm_host.h linux-2.6.35.4/arch/x86/include/asm/kvm_host.h
+--- linux-2.6.35.4/arch/x86/include/asm/kvm_host.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/kvm_host.h 2010-09-17 20:12:09.000000000 -0400
+@@ -536,7 +536,7 @@ struct kvm_x86_ops {
+ const struct trace_print_flags *exit_reasons_str;
+ };
+
+-extern struct kvm_x86_ops *kvm_x86_ops;
++extern const struct kvm_x86_ops *kvm_x86_ops;
+
+ int kvm_mmu_module_init(void);
+ void kvm_mmu_module_exit(void);
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/local.h linux-2.6.35.4/arch/x86/include/asm/local.h
+--- linux-2.6.35.4/arch/x86/include/asm/local.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/local.h 2010-09-17 20:12:09.000000000 -0400
+@@ -18,26 +18,90 @@ typedef struct {
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_DEC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_INC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_SUB "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_ADD "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +119,24 @@ static inline int local_sub_and_test(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_ADD "%2,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +154,24 @@ static inline int local_dec_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_INC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +189,24 @@ static inline int local_inc_and_test(loc
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_DEC "%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +225,24 @@ static inline int local_add_negative(lon
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_SUB "%2,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +265,23 @@ static inline long local_add_return(long
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ _ASM_MOV "%0,%1\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h
+--- linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h 2010-09-17 20:12:09.000000000 -0400
+@@ -81,8 +81,8 @@ static inline unsigned char current_lock
+ #else
+ #define lock_cmos_prefix(reg) do {} while (0)
+ #define lock_cmos_suffix(reg) do {} while (0)
+-#define lock_cmos(reg)
+-#define unlock_cmos()
++#define lock_cmos(reg) do {} while (0)
++#define unlock_cmos() do {} while (0)
+ #define do_i_have_lock_cmos() 0
+ #define current_lock_cmos_reg() 0
+ #endif
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/microcode.h linux-2.6.35.4/arch/x86/include/asm/microcode.h
+--- linux-2.6.35.4/arch/x86/include/asm/microcode.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/microcode.h 2010-09-17 20:12:09.000000000 -0400
+@@ -12,13 +12,13 @@ struct device;
+ enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+
+ struct microcode_ops {
+- enum ucode_state (*request_microcode_user) (int cpu,
++ enum ucode_state (* const request_microcode_user) (int cpu,
+ const void __user *buf, size_t size);
+
+- enum ucode_state (*request_microcode_fw) (int cpu,
++ enum ucode_state (* const request_microcode_fw) (int cpu,
+ struct device *device);
+
+- void (*microcode_fini_cpu) (int cpu);
++ void (* const microcode_fini_cpu) (int cpu);
+
+ /*
+ * The generic 'microcode_core' part guarantees that
+@@ -38,18 +38,18 @@ struct ucode_cpu_info {
+ extern struct ucode_cpu_info ucode_cpu_info[];
+
+ #ifdef CONFIG_MICROCODE_INTEL
+-extern struct microcode_ops * __init init_intel_microcode(void);
++extern const struct microcode_ops * __init init_intel_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_intel_microcode(void)
++static inline const struct microcode_ops * __init init_intel_microcode(void)
+ {
+ return NULL;
+ }
+ #endif /* CONFIG_MICROCODE_INTEL */
+
+ #ifdef CONFIG_MICROCODE_AMD
+-extern struct microcode_ops * __init init_amd_microcode(void);
++extern const struct microcode_ops * __init init_amd_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_amd_microcode(void)
++static inline const struct microcode_ops * __init init_amd_microcode(void)
+ {
+ return NULL;
+ }
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/mman.h linux-2.6.35.4/arch/x86/include/asm/mman.h
+--- linux-2.6.35.4/arch/x86/include/asm/mman.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/mman.h 2010-09-17 20:12:09.000000000 -0400
+@@ -5,4 +5,14 @@
+
+ #include <asm-generic/mman.h>
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/mmu_context.h linux-2.6.35.4/arch/x86/include/asm/mmu_context.h
+--- linux-2.6.35.4/arch/x86/include/asm/mmu_context.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/mmu_context.h 2010-09-17 20:12:09.000000000 -0400
+@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ unsigned int i;
++ pgd_t *pgd;
++
++ pax_open_kernel();
++ pgd = get_cpu_pgd(smp_processor_id());
++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++ if (paravirt_enabled())
++ set_pgd(pgd+i, native_make_pgd(0));
++ else
++ pgd[i] = native_make_pgd(0);
++ pax_close_kernel();
++#endif
++
+ #ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_s
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ #ifdef CONFIG_SMP
++#ifdef CONFIG_X86_32
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#else
+ load_cr3(next->pgd);
++#endif
+
+ /*
+ * load the LDT, if the LDT is different:
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+- }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!(__supported_pte_mask & _PAGE_NX)) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++ }
+ else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#endif
++
++#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+@@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_s
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(next->pgd);
++#endif
++
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!(__supported_pte_mask & _PAGE_NX))
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
+- }
+ #endif
++ }
+ }
+
+ #define activate_mm(prev, next) \
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/mmu.h linux-2.6.35.4/arch/x86/include/asm/mmu.h
+--- linux-2.6.35.4/arch/x86/include/asm/mmu.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/mmu.h 2010-09-17 20:12:09.000000000 -0400
+@@ -9,10 +9,23 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
++
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/module.h linux-2.6.35.4/arch/x86/include/asm/module.h
+--- linux-2.6.35.4/arch/x86/include/asm/module.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/module.h 2010-09-17 20:12:37.000000000 -0400
+@@ -59,13 +59,31 @@
+ #error unknown processor family
+ #endif
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
+ #ifdef CONFIG_X86_32
+ # ifdef CONFIG_4KSTACKS
+ # define MODULE_STACKSIZE "4KSTACKS "
+ # else
+ # define MODULE_STACKSIZE ""
+ # endif
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
++# ifdef CONFIG_PAX_KERNEXEC
++# define MODULE_PAX_KERNEXEC "KERNEXEC "
++# else
++# define MODULE_PAX_KERNEXEC ""
++# endif
++# ifdef CONFIG_GRKERNSEC
++# define MODULE_GRSEC "GRSECURITY "
++# else
++# define MODULE_GRSEC ""
++# endif
++# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++#else
++# define MODULE_ARCH_VERMAGIC MODULE_PAX_UDEREF
+ #endif
+
+ #endif /* _ASM_X86_MODULE_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/page_32_types.h linux-2.6.35.4/arch/x86/include/asm/page_32_types.h
+--- linux-2.6.35.4/arch/x86/include/asm/page_32_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/page_32_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -15,6 +15,10 @@
+ */
+ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
++#endif
++
+ #ifdef CONFIG_4KSTACKS
+ #define THREAD_ORDER 0
+ #else
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/paravirt.h linux-2.6.35.4/arch/x86/include/asm/paravirt.h
+--- linux-2.6.35.4/arch/x86/include/asm/paravirt.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/paravirt.h 2010-09-17 20:12:09.000000000 -0400
+@@ -720,6 +720,21 @@ static inline void __set_fixmap(unsigned
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+ static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+@@ -936,7 +951,7 @@ extern void default_banner(void);
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1013,6 +1028,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0 \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3 \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h
+--- linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -312,6 +312,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct arch_spinlock;
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pci_x86.h linux-2.6.35.4/arch/x86/include/asm/pci_x86.h
+--- linux-2.6.35.4/arch/x86/include/asm/pci_x86.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pci_x86.h 2010-09-17 20:12:09.000000000 -0400
+@@ -91,16 +91,16 @@ extern int (*pcibios_enable_irq)(struct
+ extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+
+ struct pci_raw_ops {
+- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
++ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
++ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
+ };
+
+-extern struct pci_raw_ops *raw_pci_ops;
+-extern struct pci_raw_ops *raw_pci_ext_ops;
++extern const struct pci_raw_ops *raw_pci_ops;
++extern const struct pci_raw_ops *raw_pci_ext_ops;
+
+-extern struct pci_raw_ops pci_direct_conf1;
++extern const struct pci_raw_ops pci_direct_conf1;
+ extern bool port_cf9_safe;
+
+ /* arch_initcall level */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgalloc.h linux-2.6.35.4/arch/x86/include/asm/pgalloc.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgalloc.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgalloc.h 2010-09-17 20:12:09.000000000 -0400
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h 2010-09-17 20:12:09.000000000 -0400
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -25,8 +25,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -47,6 +45,11 @@ extern void set_pmd_pfn(unsigned long, u
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define __KM_PTE \
+ (in_nmi() ? KM_NMI_PTE : \
+@@ -71,7 +74,9 @@ extern void set_pmd_pfn(unsigned long, u
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -83,6 +88,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h 2010-09-17 20:12:09.000000000 -0400
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h 2010-09-17 20:12:09.000000000 -0400
+@@ -16,10 +16,13 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
+ *pgdp = pgd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pgd_clear(pgd_t *pgd)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable.h linux-2.6.35.4/arch/x86/include/asm/pgtable.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
+@@ -76,12 +76,51 @@ extern struct list_head pgd_list;
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(cr0 & X86_CR0_WP));
++ write_cr0(cr0);
++ return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
++ write_cr0(cr0);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -169,9 +208,29 @@ static inline pte_t pte_wrprotect(pte_t
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -304,6 +363,15 @@ pte_t *populate_extra_pte(unsigned long
+ #endif
+
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
++{
++ return cpu_pgd[cpu];
++}
++#endif
++
+ #include <linux/mm_types.h>
+
+ static inline int pte_none(pte_t pte)
+@@ -474,7 +542,7 @@ static inline pud_t *pud_offset(pgd_t *p
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -497,7 +565,12 @@ static inline int pgd_none(pgd_t pgd)
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
++#endif
++
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+@@ -508,6 +581,20 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
++#else
++#define PAX_USER_SHADOW_BASE (_AC(0,UL))
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+
+ extern int direct_gbpages;
+@@ -613,11 +700,23 @@ static inline void ptep_set_wrprotect(st
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ while (count--)
++ *dst++ = *src++;
++ pax_close_kernel();
+ }
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
++#endif
+
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h
+--- linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h 2010-09-17 20:12:09.000000000 -0400
+@@ -16,12 +16,11 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -39,7 +38,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -55,8 +53,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -93,6 +93,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -103,8 +106,8 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -163,8 +166,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
+ {
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
+
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+
+@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
+ return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ return native_pgd_val(pud.pgd);
+@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
+ return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ return native_pgd_val(pmd.pud.pgd);
+@@ -278,7 +287,6 @@ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
+-extern int nx_enabled;
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/processor.h linux-2.6.35.4/arch/x86/include/asm/processor.h
+--- linux-2.6.35.4/arch/x86/include/asm/processor.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/processor.h 2010-09-17 20:12:09.000000000 -0400
+@@ -269,7 +269,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -884,8 +884,15 @@ static inline void spin_lock_prefetch(co
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+ .sp0 = sizeof(init_stack) + (long)&init_stack, \
+@@ -902,7 +909,7 @@ static inline void spin_lock_prefetch(co
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -913,11 +920,7 @@ static inline void spin_lock_prefetch(co
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((info)->task.thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -932,7 +935,7 @@ extern unsigned long thread_saved_pc(str
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -942,13 +945,13 @@ extern unsigned long thread_saved_pc(str
+ /*
+ * User space process size. 47bits minus one guard page.
+ */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -985,6 +988,10 @@ extern void start_thread(struct pt_regs
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/ptrace.h linux-2.6.35.4/arch/x86/include/asm/ptrace.h
+--- linux-2.6.35.4/arch/x86/include/asm/ptrace.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/ptrace.h 2010-09-17 20:12:09.000000000 -0400
+@@ -152,28 +152,29 @@ static inline unsigned long regs_return_
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/reboot.h linux-2.6.35.4/arch/x86/include/asm/reboot.h
+--- linux-2.6.35.4/arch/x86/include/asm/reboot.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/reboot.h 2010-09-17 20:12:09.000000000 -0400
+@@ -18,7 +18,7 @@ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(const unsigned char *code, int length);
++void machine_real_restart(const unsigned char *code, unsigned int length);
+
+ typedef void (*nmi_shootdown_cb)(int, struct die_args*);
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/rwsem.h linux-2.6.35.4/arch/x86/include/asm/rwsem.h
+--- linux-2.6.35.4/arch/x86/include/asm/rwsem.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/rwsem.h 2010-09-17 20:12:09.000000000 -0400
+@@ -118,10 +118,26 @@ static inline void __down_read(struct rw
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX _ASM_DEC "(%1)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* adds 0x00000001, returns the old value */
+- " jns 1f\n"
++ " jns 2f\n"
+ " call call_rwsem_down_read_failed\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending down_read\n\t"
+ : "+m" (sem->count)
+ : "a" (sem)
+@@ -136,13 +152,29 @@ static inline int __down_read_trylock(st
+ rwsem_count_t result, tmp;
+ asm volatile("# beginning __down_read_trylock\n\t"
+ " mov %0,%1\n\t"
+- "1:\n\t"
++ "2:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
+- " jle 2f\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "sub %3,%2\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ " jle 3f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+- " jnz 1b\n\t"
+- "2:\n\t"
++ " jnz 2b\n\t"
++ "3:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "i" (RWSEM_ACTIVE_READ_BIAS)
+@@ -160,12 +192,28 @@ static inline void __down_write_nested(s
+ tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "mov %1,(%2)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* subtract 0x0000ffff, returns the old value */
+ " test %1,%1\n\t"
+ /* was the count 0 before? */
+- " jz 1f\n"
++ " jz 2f\n"
+ " call call_rwsem_down_write_failed\n"
+- "1:\n"
++ "2:\n"
+ "# ending down_write"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+@@ -198,10 +246,26 @@ static inline void __up_read(struct rw_s
+ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "mov %1,(%2)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* subtracts 1, returns the old value */
+- " jns 1f\n\t"
++ " jns 2f\n\t"
+ " call call_rwsem_wake\n"
+- "1:\n"
++ "2:\n"
+ "# ending __up_read\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
+@@ -216,11 +280,27 @@ static inline void __up_write(struct rw_
+ rwsem_count_t tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "mov %1,(%2)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /* tries to transition
+ 0xffff0001 -> 0x00000000 */
+- " jz 1f\n"
++ " jz 2f\n"
+ " call call_rwsem_wake\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending __up_write\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+@@ -234,13 +314,29 @@ static inline void __downgrade_write(str
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+ */
+- " jns 1f\n\t"
++ " jns 2f\n\t"
+ " call call_rwsem_downgrade_wake\n"
+- "1:\n\t"
++ "2:\n\t"
+ "# ending __downgrade_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+@@ -253,7 +349,23 @@ static inline void __downgrade_write(str
+ static inline void rwsem_atomic_add(rwsem_count_t delta,
+ struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (sem->count)
+ : "er" (delta));
+ }
+@@ -266,7 +378,23 @@ static inline rwsem_count_t rwsem_atomic
+ {
+ rwsem_count_t tmp = delta;
+
+- asm volatile(LOCK_PREFIX "xadd %0,%1"
++ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ "mov %0,%1\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/segment.h linux-2.6.35.4/arch/x86/include/asm/segment.h
+--- linux-2.6.35.4/arch/x86/include/asm/segment.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/segment.h 2010-09-17 20:12:09.000000000 -0400
+@@ -62,8 +62,8 @@
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ]
+- * 29 - unused
+- * 30 - unused
++ * 29 - PCI BIOS CS
++ * 30 - PCI BIOS DS
+ * 31 - TSS for double fault handler
+ */
+ #define GDT_ENTRY_TLS_MIN 6
+@@ -77,6 +77,8 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
++
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
+@@ -88,7 +90,7 @@
+ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
+ #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
+-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
++#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
+ #ifdef CONFIG_SMP
+ #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
+ #else
+@@ -102,6 +104,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -139,7 +147,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+@@ -163,6 +171,8 @@
+ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
+ #define __USER32_DS __USER_DS
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8 /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -183,6 +193,7 @@
+ #endif
+
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/spinlock.h linux-2.6.35.4/arch/x86/include/asm/spinlock.h
+--- linux-2.6.35.4/arch/x86/include/asm/spinlock.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/spinlock.h 2010-09-17 20:12:09.000000000 -0400
+@@ -249,18 +249,50 @@ static inline int arch_write_can_lock(ar
+ static inline void arch_read_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
+- "jns 1f\n"
+- "call __read_lock_failed\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
+ "1:\n"
++ LOCK_PREFIX " addl $1,(%0)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "jns 2f\n"
++ "call __read_lock_failed\n\t"
++ "2:\n"
+ ::LOCK_PTR_REG (rw) : "memory");
+ }
+
+ static inline void arch_write_lock(arch_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
+- "jz 1f\n"
+- "call __write_lock_failed\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
+ "1:\n"
++ LOCK_PREFIX " addl %1,(%0)\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ "jz 2f\n"
++ "call __write_lock_failed\n\t"
++ "2:\n"
+ ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+@@ -286,12 +318,45 @@ static inline int arch_write_trylock(arc
+
+ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "decl %0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
++ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void arch_write_unlock(arch_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "addl %1, %0"
++ asm volatile(LOCK_PREFIX "addl %1, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++#ifdef CONFIG_X86_32
++ "into\n0:\n"
++#else
++ "jno 0f\n"
++ "int $4\n0:\n"
++#endif
++ ".pushsection .fixup,\"ax\"\n"
++ "1:\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "jmp 0b\n"
++ ".popsection\n"
++ _ASM_EXTABLE(0b, 1b)
++#endif
++
+ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/system.h linux-2.6.35.4/arch/x86/include/asm/system.h
+--- linux-2.6.35.4/arch/x86/include/asm/system.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/system.h 2010-09-17 20:12:09.000000000 -0400
+@@ -202,7 +202,7 @@ static inline unsigned long get_limit(un
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -342,7 +342,7 @@ void enable_hlt(void);
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h
+--- linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h 2010-09-17 20:12:09.000000000 -0400
+@@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to,
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_to_user_ll(to, from, n);
+ }
+
+@@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -138,6 +146,10 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __
+ return ret;
+ }
+ }
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_from_user_ll(to, from, n);
+ }
+
+@@ -160,6 +174,10 @@ static __always_inline unsigned long __c
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -182,15 +200,19 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
+-}
++ if ((long)n < 0)
++ return n;
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check _copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
++}
+
++extern void copy_to_user_overflow(void)
++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
++ __compiletime_error("copy_to_user() buffer size is not provably correct")
++#else
++ __compiletime_warning("copy_to_user() buffer size is not provably correct")
++#endif
++;
+
+ extern void copy_from_user_overflow(void)
+ #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+@@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void
+ #endif
+ ;
+
+-static inline unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n)
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ int sz = __compiletime_object_size(from);
++
++ if (unlikely(sz != -1 && sz < n))
++ copy_to_user_overflow();
++ else if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ int sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+- else
++ if (unlikely(sz != -1 && sz < n))
+ copy_from_user_overflow();
+-
++ else if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h
+--- linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h 2010-09-17 20:12:37.000000000 -0400
+@@ -11,6 +11,11 @@
+ #include <asm/alternative.h>
+ #include <asm/cpufeature.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
++
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
+
+ /*
+ * Copy To/From Userspace
+@@ -37,26 +42,26 @@ copy_user_generic(void *to, const void *
+ return ret;
+ }
+
+-__must_check unsigned long
+-_copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-_copy_from_user(void *to, const void __user *from, unsigned len);
++static __always_inline __must_check unsigned long
++__copy_to_user(void __user *to, const void *from, unsigned len);
++static __always_inline __must_check unsigned long
++__copy_from_user(void *to, const void __user *from, unsigned len);
+ __must_check unsigned long
+ copy_in_user(void __user *to, const void __user *from, unsigned len);
+
+ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+- unsigned long n)
++ unsigned n)
+ {
+- int sz = __compiletime_object_size(to);
+-
+ might_fault();
+- if (likely(sz == -1 || sz >= n))
+- n = _copy_from_user(to, from, n);
+-#ifdef CONFIG_DEBUG_VM
+- else
+- WARN(1, "Buffer overflow detected!\n");
+-#endif
++
++ if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((int)n > 0) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ memset(to, 0, n);
++ }
+ return n;
+ }
+
+@@ -65,17 +70,35 @@ int copy_to_user(void __user *dst, const
+ {
+ might_fault();
+
+- return _copy_to_user(dst, src, size);
++ if (access_ok(VERIFY_WRITE, dst, size))
++ size = __copy_to_user(dst, src, size);
++ return size;
+ }
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(dst, size, false);
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
+ return copy_user_generic(dst, (__force void *)src, size);
++ }
+ switch (size) {
+ case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+@@ -108,18 +131,36 @@ int __copy_from_user(void *dst, const vo
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
+ return copy_user_generic(dst, (__force void *)src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
+ {
+- int ret = 0;
++ int sz = __compiletime_object_size(src);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (unlikely(sz != -1 && sz < size)) {
++#ifdef CONFIG_DEBUG_VM
++ WARN(1, "Buffer overflow detected!\n");
++#endif
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ check_object_size(src, size, true);
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
+ return copy_user_generic((__force void *)dst, src, size);
++ }
+ switch (size) {
+ case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+@@ -152,19 +193,30 @@ int __copy_to_user(void __user *dst, con
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ {
+- int ret = 0;
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
++
++ if ((int)size < 0)
++ return size;
++
++ if (!__builtin_constant_p(size)) {
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
++ }
+ switch (size) {
+ case 1: {
+ u8 tmp;
+@@ -204,6 +256,10 @@ int __copy_in_user(void __user *dst, con
+ return ret;
+ }
+ default:
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
+ }
+@@ -222,33 +278,45 @@ __must_check unsigned long __clear_user(
+ static __must_check __always_inline int
+ __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+ {
++ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
++ src += PAX_USER_SHADOW_BASE;
+ return copy_user_generic(dst, (__force const void *)src, size);
+ }
+
+-static __must_check __always_inline int
++static __must_check __always_inline unsigned long
+ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
++ dst += PAX_USER_SHADOW_BASE;
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
+ unsigned size, int zerorest);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+ {
+ might_sleep();
++
++ if ((int)size < 0)
++ return size;
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+ unsigned size)
+ {
++ if ((int)size < 0)
++ return size;
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
++extern unsigned long
+ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess.h linux-2.6.35.4/arch/x86/include/asm/uaccess.h
+--- linux-2.6.35.4/arch/x86/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
+@@ -8,12 +8,15 @@
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+@@ -29,7 +32,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#ifdef CONFIG_X86_32
++void __set_fs(mm_segment_t x, int cpu);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -77,7 +85,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -183,13 +217,21 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#ifdef CONFIG_X86_32
++#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
++#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
++#else
++#define _ASM_LOAD_USER_DS(ds)
++#define _ASM_LOAD_KERNEL_DS
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: movl %%eax,%%ds:0(%2)\n" \
++ "2: movl %%edx,%%ds:4(%2)\n" \
+ "3:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+@@ -197,15 +239,18 @@ extern int __get_user_bad(void);
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (err) \
+- : "A" (x), "r" (addr), "i" (errret), "0" (err))
++ : "A" (x), "r" (addr), "i" (errret), "0" (err), \
++ "r"(__USER_DS))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: movl %%eax,%%ds:0(%1)\n" \
++ "2: movl %%edx,%%ds:4(%1)\n" \
+ "3:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+- : : "A" (x), "r" (addr))
++ : : "A" (x), "r" (addr), "r"(__USER_DS))
+
+ #define __put_user_x8(x, ptr, __ret_pu) \
+ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
+@@ -374,16 +419,18 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: mov"itype" %%ds:%2,%"rtype"1\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
+- : "m" (__m(addr)), "i" (errret), "0" (err))
++ : "=r" (err), ltype (x) \
++ : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
+
+ #define __get_user_size_ex(x, ptr, size) \
+ do { \
+@@ -407,10 +454,12 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: mov"itype" %%ds:%1,%"rtype"0\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+- : ltype(x) : "m" (__m(addr)))
++ : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
+
+ #define __put_user_nocheck(x, ptr, size) \
+ ({ \
+@@ -424,13 +473,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < PAX_USER_SHADOW_BASE) \
++ ____x += PAX_USER_SHADOW_BASE; \
++ (void __user *)____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile(_ASM_LOAD_USER_DS(5) \
++ "1: mov"itype" %"rtype"1,%%ds:%2\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
++ "r"(__USER_DS))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile(_ASM_LOAD_USER_DS(2) \
++ "1: mov"itype" %"rtype"0,%%ds:%1\n" \
+ "2:\n" \
++ _ASM_LOAD_KERNEL_DS \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+- : : ltype(x), "m" (__m(addr)))
++ : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
+
+ /*
+ * uaccess_try and catch
+@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+@@ -567,6 +632,7 @@ extern struct movsl_mask {
+
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+
++#define ARCH_HAS_SORT_EXTABLE
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/vgtod.h linux-2.6.35.4/arch/x86/include/asm/vgtod.h
+--- linux-2.6.35.4/arch/x86/include/asm/vgtod.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/vgtod.h 2010-09-17 20:12:09.000000000 -0400
+@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
+ int sysctl_enabled;
+ struct timezone sys_tz;
+ struct { /* extract of a clocksource struct */
++ char name[8];
+ cycle_t (*vread)(void);
+ cycle_t cycle_last;
+ cycle_t mask;
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/vmi.h linux-2.6.35.4/arch/x86/include/asm/vmi.h
+--- linux-2.6.35.4/arch/x86/include/asm/vmi.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/vmi.h 2010-09-17 20:12:09.000000000 -0400
+@@ -191,6 +191,7 @@ struct vrom_header {
+ u8 reserved[96]; /* Reserved for headers */
+ char vmi_init[8]; /* VMI_Init jump point */
+ char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
++ char rom_data[8048]; /* rest of the option ROM */
+ } __attribute__((packed));
+
+ struct pnp_header {
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/vsyscall.h linux-2.6.35.4/arch/x86/include/asm/vsyscall.h
+--- linux-2.6.35.4/arch/x86/include/asm/vsyscall.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/vsyscall.h 2010-09-17 20:12:09.000000000 -0400
+@@ -15,9 +15,10 @@ enum vsyscall_num {
+
+ #ifdef __KERNEL__
+ #include <linux/seqlock.h>
++#include <linux/getcpu.h>
++#include <linux/time.h>
+
+ #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
+-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
+
+ /* Definitions for CONFIG_GENERIC_TIME definitions */
+ #define __section_vsyscall_gtod_data __attribute__ \
+@@ -31,7 +32,6 @@ enum vsyscall_num {
+ #define VGETCPU_LSL 2
+
+ extern int __vgetcpu_mode;
+-extern volatile unsigned long __jiffies;
+
+ /* kernel space (writeable) */
+ extern int vgetcpu_mode;
+@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
+
+ extern void map_vsyscall(void);
+
++extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
++extern time_t vtime(time_t *t);
++extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff -urNp linux-2.6.35.4/arch/x86/include/asm/xsave.h linux-2.6.35.4/arch/x86/include/asm/xsave.h
+--- linux-2.6.35.4/arch/x86/include/asm/xsave.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/include/asm/xsave.h 2010-09-17 20:12:09.000000000 -0400
+@@ -59,6 +59,12 @@ static inline int fpu_xrstor_checking(st
+ static inline int xsave_user(struct xsave_struct __user *buf)
+ {
+ int err;
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
++#endif
++
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+@@ -85,6 +91,11 @@ static inline int xrestore_user(struct x
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
++#endif
++
+ __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+diff -urNp linux-2.6.35.4/arch/x86/Kconfig linux-2.6.35.4/arch/x86/Kconfig
+--- linux-2.6.35.4/arch/x86/Kconfig 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/Kconfig 2010-09-17 20:12:37.000000000 -0400
+@@ -1038,7 +1038,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1075,7 +1075,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1129,7 +1129,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1461,7 +1461,7 @@ config ARCH_USES_PG_UNCACHED
+
+ config EFI
+ bool "EFI runtime service support"
+- depends on ACPI
++ depends on ACPI && !PAX_KERNEXEC
+ ---help---
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+@@ -1548,6 +1548,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1611,6 +1612,7 @@ config X86_NEED_RELOCS
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1642,9 +1644,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+
+diff -urNp linux-2.6.35.4/arch/x86/Kconfig.cpu linux-2.6.35.4/arch/x86/Kconfig.cpu
+--- linux-2.6.35.4/arch/x86/Kconfig.cpu 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/Kconfig.cpu 2010-09-17 20:12:09.000000000 -0400
+@@ -336,7 +336,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -360,7 +360,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -406,7 +406,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff -urNp linux-2.6.35.4/arch/x86/Kconfig.debug linux-2.6.35.4/arch/x86/Kconfig.debug
+--- linux-2.6.35.4/arch/x86/Kconfig.debug 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/Kconfig.debug 2010-09-17 20:12:09.000000000 -0400
+@@ -97,7 +97,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/boot.c linux-2.6.35.4/arch/x86/kernel/acpi/boot.c
+--- linux-2.6.35.4/arch/x86/kernel/acpi/boot.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/acpi/boot.c 2010-09-17 20:12:09.000000000 -0400
+@@ -1472,7 +1472,7 @@ static struct dmi_system_id __initdata a
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
+- {}
++ { NULL, NULL, {{0, {0}}}, NULL}
+ };
+
+ /*
+diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S
+--- linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S 2010-09-17 20:12:09.000000000 -0400
+@@ -104,7 +104,7 @@ _start:
+ movl %eax, %ecx
+ orl %edx, %ecx
+ jz 1f
+- movl $0xc0000080, %ecx
++ mov $MSR_EFER, %ecx
+ wrmsr
+ 1:
+
+diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c
+--- linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c 2010-09-17 20:12:09.000000000 -0400
+@@ -11,11 +11,12 @@
+ #include <linux/cpumask.h>
+ #include <asm/segment.h>
+ #include <asm/desc.h>
++#include <asm/e820.h>
+
+ #include "realmode/wakeup.h"
+ #include "sleep.h"
+
+-unsigned long acpi_wakeup_address;
++unsigned long acpi_wakeup_address = 0x2000;
+ unsigned long acpi_realmode_flags;
+
+ /* address in low memory of the wakeup routine. */
+@@ -96,8 +97,12 @@ int acpi_save_state_mem(void)
+ header->trampoline_segment = setup_trampoline() >> 4;
+ #ifdef CONFIG_SMP
+ stack_start.sp = temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S
+--- linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S 2010-09-17 20:12:09.000000000 -0400
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff -urNp linux-2.6.35.4/arch/x86/kernel/alternative.c linux-2.6.35.4/arch/x86/kernel/alternative.c
+--- linux-2.6.35.4/arch/x86/kernel/alternative.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/alternative.c 2010-09-17 20:12:09.000000000 -0400
+@@ -247,7 +247,7 @@ static void alternatives_smp_lock(const
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn DS segment override prefix into lock prefix */
+- if (*ptr == 0x3e)
++ if (*ktla_ktva(ptr) == 0x3e)
+ text_poke(ptr, ((unsigned char []){0xf0}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -268,7 +268,7 @@ static void alternatives_smp_unlock(cons
+ if (!*poff || ptr < text || ptr >= text_end)
+ continue;
+ /* turn lock prefix into DS segment override prefix */
+- if (*ptr == 0xf0)
++ if (*ktla_ktva(ptr) == 0xf0)
+ text_poke(ptr, ((unsigned char []){0x3E}), 1);
+ };
+ mutex_unlock(&text_mutex);
+@@ -436,7 +436,7 @@ void __init_or_module apply_paravirt(str
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -504,7 +504,7 @@ void __init alternative_instructions(voi
+ if (smp_alt_once)
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_locks,
+- (unsigned long)__smp_locks_end);
++ PAGE_ALIGN((unsigned long)__smp_locks_end));
+
+ restart_nmi();
+ }
+@@ -521,13 +521,17 @@ void __init alternative_instructions(voi
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
++static void *__kprobes text_poke_early(void *addr, const void *opcode,
+ size_t len)
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
+ sync_core();
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
+@@ -549,36 +553,22 @@ static void *__init_or_module text_poke_
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]);
+ return addr;
+ }
+
+diff -urNp linux-2.6.35.4/arch/x86/kernel/amd_iommu.c linux-2.6.35.4/arch/x86/kernel/amd_iommu.c
+--- linux-2.6.35.4/arch/x86/kernel/amd_iommu.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/amd_iommu.c 2010-09-17 20:12:09.000000000 -0400
+@@ -2284,7 +2284,7 @@ static void prealloc_protection_domains(
+ }
+ }
+
+-static struct dma_map_ops amd_iommu_dma_ops = {
++static const struct dma_map_ops amd_iommu_dma_ops = {
+ .alloc_coherent = alloc_coherent,
+ .free_coherent = free_coherent,
+ .map_page = map_page,
+diff -urNp linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c
+--- linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c 2010-09-17 20:12:09.000000000 -0400
+@@ -691,7 +691,7 @@ struct IO_APIC_route_entry **alloc_ioapi
+ ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
+ GFP_ATOMIC);
+ if (!ioapic_entries)
+- return 0;
++ return NULL;
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ ioapic_entries[apic] =
+@@ -708,7 +708,7 @@ nomem:
+ kfree(ioapic_entries[apic]);
+ kfree(ioapic_entries);
+
+- return 0;
++ return NULL;
+ }
+
+ /*
+@@ -1118,7 +1118,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1126,7 +1126,7 @@ void lock_vector_lock(void)
+ raw_spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ raw_spin_unlock(&vector_lock);
+ }
+diff -urNp linux-2.6.35.4/arch/x86/kernel/apm_32.c linux-2.6.35.4/arch/x86/kernel/apm_32.c
+--- linux-2.6.35.4/arch/x86/kernel/apm_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/apm_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -975,7 +989,7 @@ recalc:
+
+ static void apm_power_off(void)
+ {
+- unsigned char po_bios_call[] = {
++ const unsigned char po_bios_call[] = {
+ 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
+ 0x8e, 0xd0, /* movw ax,ss */
+ 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
+@@ -1931,7 +1945,10 @@ static const struct file_operations apm_
+ static struct miscdevice apm_device = {
+ APM_MINOR_DEV,
+ "apm_bios",
+- &apm_bios_fops
++ &apm_bios_fops,
++ {NULL, NULL},
++ NULL,
++ NULL
+ };
+
+
+@@ -2252,7 +2269,7 @@ static struct dmi_system_id __initdata a
+ { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
+ },
+
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
+ };
+
+ /*
+@@ -2355,12 +2372,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff -urNp linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c
+--- linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -115,6 +115,11 @@ void foo(void)
+ OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
+ #endif
+
+ #ifdef CONFIG_XEN
+diff -urNp linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c
+--- linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c 2010-09-17 20:12:09.000000000 -0400
+@@ -63,6 +63,18 @@ int main(void)
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
++#endif
++
+ #endif
+
+
+@@ -115,6 +127,7 @@ int main(void)
+ ENTRY(cr8);
+ BLANK();
+ #undef ENTRY
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/common.c linux-2.6.35.4/arch/x86/kernel/cpu/common.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/common.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/common.c 2010-09-17 20:12:09.000000000 -0400
+@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -802,6 +748,10 @@ static void __cpuinit identify_cpu(struc
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -1117,7 +1067,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ oist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1143,7 +1093,7 @@ void __cpuinit cpu_init(void)
+ switch_to_new_gdt(cpu);
+ loadsegment(fs, 0);
+
+- load_idt((const struct desc_ptr *)&idt_descr);
++ load_idt(&idt_descr);
+
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
+@@ -1205,7 +1155,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-09-17 20:12:09.000000000 -0400
+@@ -484,7 +484,7 @@ static const struct dmi_system_id sw_any
+ DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
+ },
+ },
+- { }
++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
+ };
+
+ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-09-17 20:12:09.000000000 -0400
+@@ -226,7 +226,7 @@ static struct cpu_model models[] =
+ { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
+
+- { NULL, }
++ { NULL, NULL, 0, NULL}
+ };
+ #undef _BANIAS
+ #undef BANIAS
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/intel.c linux-2.6.35.4/arch/x86/kernel/cpu/intel.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/intel.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/intel.c 2010-09-17 20:12:09.000000000 -0400
+@@ -160,7 +160,7 @@ static void __cpuinit trap_init_f00f_bug
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/Makefile linux-2.6.35.4/arch/x86/kernel/cpu/Makefile
+--- linux-2.6.35.4/arch/x86/kernel/cpu/Makefile 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/Makefile 2010-09-17 20:12:09.000000000 -0400
+@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
+ CFLAGS_REMOVE_perf_event.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o addon_cpuid_features.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c 2010-09-17 20:12:09.000000000 -0400
+@@ -219,7 +219,7 @@ static void print_mce(struct mce *m)
+ !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+ m->cs, m->ip);
+
+- if (m->cs == __KERNEL_CS)
++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+ print_symbol("{%s}", m->ip);
+ pr_cont("\n");
+ }
+@@ -1471,14 +1471,14 @@ void __cpuinit mcheck_cpu_init(struct cp
+ */
+
+ static DEFINE_SPINLOCK(mce_state_lock);
+-static int open_count; /* #times opened */
++static atomic_t open_count; /* #times opened */
+ static int open_exclu; /* already open exclusive? */
+
+ static int mce_open(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_state_lock);
+
+- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
++ if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_state_lock);
+
+ return -EBUSY;
+@@ -1486,7 +1486,7 @@ static int mce_open(struct inode *inode,
+
+ if (file->f_flags & O_EXCL)
+ open_exclu = 1;
+- open_count++;
++ atomic_inc(&open_count);
+
+ spin_unlock(&mce_state_lock);
+
+@@ -1497,7 +1497,7 @@ static int mce_release(struct inode *ino
+ {
+ spin_lock(&mce_state_lock);
+
+- open_count--;
++ atomic_dec(&open_count);
+ open_exclu = 0;
+
+ spin_unlock(&mce_state_lock);
+@@ -1683,6 +1683,7 @@ static struct miscdevice mce_log_device
+ MISC_MCELOG_MINOR,
+ "mcelog",
+ &mce_chrdev_ops,
++ {NULL, NULL}, NULL, NULL
+ };
+
+ /*
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c 2010-09-17 20:12:09.000000000 -0400
+@@ -28,7 +28,7 @@ static struct fixed_range_block fixed_ra
+ { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
+ { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
+ { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
+- {}
++ { 0, 0 }
+ };
+
+ static unsigned long smp_changes_mask;
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c 2010-09-17 20:12:09.000000000 -0400
+@@ -61,7 +61,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+
+-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+ const struct mtrr_ops *mtrr_if;
+
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h
+--- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-09-17 20:12:09.000000000 -0400
+@@ -12,19 +12,19 @@
+ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
+
+ struct mtrr_ops {
+- u32 vendor;
+- u32 use_intel_if;
+- void (*set)(unsigned int reg, unsigned long base,
++ const u32 vendor;
++ const u32 use_intel_if;
++ void (* const set)(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type);
+- void (*set_all)(void);
++ void (* const set_all)(void);
+
+- void (*get)(unsigned int reg, unsigned long *base,
++ void (* const get)(unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type);
+- int (*get_free_region)(unsigned long base, unsigned long size,
++ int (* const get_free_region)(unsigned long base, unsigned long size,
+ int replace_reg);
+- int (*validate_add_page)(unsigned long base, unsigned long size,
++ int (* const validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+- int (*have_wrcomb)(void);
++ int (* const have_wrcomb)(void);
+ };
+
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-09-17 20:12:09.000000000 -0400
+@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
+
+ /* Interface defining a CPU specific perfctr watchdog */
+ struct wd_ops {
+- int (*reserve)(void);
+- void (*unreserve)(void);
+- int (*setup)(unsigned nmi_hz);
+- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
+- void (*stop)(void);
++ int (* const reserve)(void);
++ void (* const unreserve)(void);
++ int (* const setup)(unsigned nmi_hz);
++ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
++ void (* const stop)(void);
+ unsigned perfctr;
+ unsigned evntsel;
+ u64 checkbit;
+@@ -634,6 +634,7 @@ static const struct wd_ops p4_wd_ops = {
+ #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
+ #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
+
++/* cannot be const, see probe_nmi_watchdog */
+ static struct wd_ops intel_arch_wd_ops;
+
+ static int setup_intel_arch_watchdog(unsigned nmi_hz)
+@@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(uns
+ return 1;
+ }
+
++/* cannot be const */
+ static struct wd_ops intel_arch_wd_ops __read_mostly = {
+ .reserve = single_msr_reserve,
+ .unreserve = single_msr_unreserve,
+diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c
+--- linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c 2010-09-17 20:12:09.000000000 -0400
+@@ -1685,7 +1685,7 @@ perf_callchain_user(struct pt_regs *regs
+ break;
+
+ callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (__force const void __user *)frame.next_frame;
+ }
+ }
+
+diff -urNp linux-2.6.35.4/arch/x86/kernel/crash.c linux-2.6.35.4/arch/x86/kernel/crash.c
+--- linux-2.6.35.4/arch/x86/kernel/crash.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/crash.c 2010-09-17 20:12:09.000000000 -0400
+@@ -40,7 +40,7 @@ static void kdump_nmi_callback(int cpu,
+ regs = args->regs;
+
+ #ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff -urNp linux-2.6.35.4/arch/x86/kernel/doublefault_32.c linux-2.6.35.4/arch/x86/kernel/doublefault_32.c
+--- linux-2.6.35.4/arch/x86/kernel/doublefault_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/doublefault_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff -urNp linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c
+--- linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp,
+@@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs
+ printk(" Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+@@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+diff -urNp linux-2.6.35.4/arch/x86/kernel/dumpstack.c linux-2.6.35.4/arch/x86/kernel/dumpstack.c
+--- linux-2.6.35.4/arch/x86/kernel/dumpstack.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/dumpstack.c 2010-09-17 20:12:09.000000000 -0400
+@@ -207,7 +207,7 @@ void dump_stack(void)
+ #endif
+
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -263,7 +263,7 @@ void __kprobes oops_end(unsigned long fl
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -290,7 +290,7 @@ int __kprobes __die(const char *str, str
+
+ show_registers(regs);
+ #ifdef CONFIG_X86_32
+- if (user_mode_vm(regs)) {
++ if (user_mode(regs)) {
+ sp = regs->sp;
+ ss = regs->ss & 0xffff;
+ } else {
+@@ -318,7 +318,7 @@ void die(const char *str, struct pt_regs
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff -urNp linux-2.6.35.4/arch/x86/kernel/efi_32.c linux-2.6.35.4/arch/x86/kernel/efi_32.c
+--- linux-2.6.35.4/arch/x86/kernel/efi_32.c 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/efi_32.c 2010-09-17 20:12:09.000000000 -0400
+@@ -38,70 +38,38 @@
+ */
+
+ static unsigned long efi_rt_eflags;
+-static pgd_t efi_bak_pg_dir_pointer[2];
++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
+
+-void efi_call_phys_prelog(void)
++void __init efi_call_phys_prelog(void)
+ {
+- unsigned long cr4;
+- unsigned long temp;
+ struct desc_ptr gdt_descr;
+
+ local_irq_save(efi_rt_eflags);
+
+- /*
+- * If I don't have PAE, I should just duplicate two entries in page
+- * directory. If I have PAE, I just need to duplicate one entry in
+- * page directory.
+- */
+- cr4 = read_cr4_safe();
+
+- if (cr4 & X86_CR4_PAE) {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- swapper_pg_dir[0].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- } else {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- efi_bak_pg_dir_pointer[1].pgd =
+- swapper_pg_dir[pgd_index(0x400000)].pgd;
+- swapper_pg_dir[pgd_index(0)].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- temp = PAGE_OFFSET + 0x400000;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- swapper_pg_dir[pgd_index(temp)].pgd;
+- }
++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+ __flush_tlb_all();
+
+- gdt_descr.address = __pa(get_cpu_gdt_table(0));
++ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ }
+
+-void efi_call_phys_epilog(void)
++void __init efi_call_phys_epilog(void)
+ {
+- unsigned long cr4;
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
++ gdt_descr.address = get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- } else {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- efi_bak_pg_dir_pointer[1].pgd;
+- }
++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
+
+ /*
+ * After the lock is released, the original page table is restored.
+diff -urNp linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S
+--- linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S 2010-09-17 20:12:09.000000000 -0400
+@@ -6,6 +6,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
+
+ /*
+@@ -20,7 +21,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++ jmp 1f-__PAGE_OFFSET
+ 1:
+
+ /*
+@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++ jmp 1f+__PAGE_OFFSET
+ 1:
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff -urNp linux-2.6.35.4/arch/x86/kernel/entry_32.S linux-2.6.35.4/arch/x86/kernel/entry_32.S
+--- linux-2.6.35.4/arch/x86/kernel/entry_32.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/entry_32.S 2010-09-17 20:12:09.000000000 -0400
+@@ -192,7 +192,67 @@
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro PAX_EXIT_KERNEL
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_PARAVIRT
++ push %eax; push %ecx;
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $16, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ pop %ecx; pop %eax
++#endif
++#endif
++.endm
++
++.macro PAX_ENTER_KERNEL
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_PARAVIRT
++ push %eax; push %ecx;
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $16, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ pop %ecx; pop %eax
++#endif
++#endif
++.endm
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl %fs
+@@ -225,7 +285,7 @@
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -233,6 +293,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ PAX_ENTER_KERNEL
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl %ebx
+ CFI_ADJUST_CFA_OFFSET -4
+@@ -357,7 +426,15 @@ check_userspace:
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ PAX_EXIT_KERNEL
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -423,10 +500,9 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ GET_THREAD_INFO(%ebp)
++ pushl TI_sysenter_return(%ebp)
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
+
+@@ -439,9 +515,19 @@ sysenter_past_esp:
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -464,12 +550,23 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call pax_randomize_kstack
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++#endif
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -513,11 +610,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -551,6 +654,10 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -615,7 +722,13 @@ ldt_ss:
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+ mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
+ mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
+@@ -655,25 +768,19 @@ work_resched:
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl %ecx # save ti_flags for do_notify_resume
+ CFI_ADJUST_CFA_OFFSET 4
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+@@ -708,6 +815,10 @@ END(syscall_exit_work)
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+@@ -791,7 +902,13 @@ ptregs_clone:
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
+ shl $16, %eax
+@@ -1273,7 +1390,6 @@ return_to_handler:
+ jmp *%ecx
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1330,9 +1446,12 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ PAX_ENTER_KERNEL
++
+ TRACE_IRQS_OFF
+ movl %esp,%eax # pt_regs pointer
+ call *%edi
+@@ -1426,6 +1545,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ PAX_EXIT_KERNEL
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1466,6 +1588,9 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ PAX_EXIT_KERNEL
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+diff -urNp linux-2.6.35.4/arch/x86/kernel/entry_64.S linux-2.6.35.4/arch/x86/kernel/entry_64.S
+--- linux-2.6.35.4/arch/x86/kernel/entry_64.S 2010-08-26 19:47:12.000000000 -0400
++++ linux-2.6.35.4/arch/x86/kernel/entry_64.S 2010-09-17 20:12:09.000000000 -0400
+@@ -53,6 +53,7 @@
+ #include <asm/paravirt.h>
+ #include <asm/ftrace.h>
+ #include <asm/percpu.h>
++#include <asm/pgtable.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -174,6 +175,189 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ push $\sel
++ push $\off
++ lretq
++#endif
++ .endm
++
++ENTRY(pax_enter_kernel)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 1f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jz 3f
++ ljmpq __KERNEL_CS,3f
++1: ljmpq __KERNEXEC_KERNEL_CS,2f
++2: SET_RDI_INTO_CR0
++3:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ pop %rdi
++#endif
++
++ retq
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jnz 2f
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ ljmpq __KERNEL_CS,1f
++1: SET_RDI_INTO_CR0
++2:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ pop %rdi
++#endif
++
++ retq
++ENDPROC(pax_exit_kernel)
++
++ENTRY(pax_enter_kernel_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %rdi
++ push %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ push %rdi
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: pop %rdi
++#endif
++ SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ pop %rbx
++ pop %rdi
++#endif
++
++ retq
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ push %rbx
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++ GET_CR3_INTO_RDI
++ add $__START_KERNEL_map,%rdi
++ sub phys_base(%rip),%rdi
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ mov %rdi,%rbx
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rdi)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: PV_RESTORE_REGS(CLBR_RDI)
++ pop %rbx
++#endif
++
++ pop %rdi
++#endif
++
++ retq
++ENDPROC(pax_exit_kernel_user)
+
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -317,7 +501,7 @@ ENTRY(save_args)
+ leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
+ movq_cfi rbp, 8 /* push %rbp */
+ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
+- testl $3, CS(%rdi)
++ testb $3, CS(%rdi)
+ je 1f
+ SWAPGS
+ /*
+@@ -409,7 +593,7 @@ ENTRY(ret_from_fork)
+
+ RESTORE_REST
+
+- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+
+ testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+@@ -468,6 +652,11 @@ ENTRY(system_call_after_swapgs)
+
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++
+ /*
+ * No need to follow this irqs off/on section - it's straight
+ * and short:
+@@ -502,6 +691,11 @@ sysret_check:
+ andl %edi,%edx
+ jnz sysret_careful
+ CFI_REMEMBER_STATE
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -613,7 +807,7 @@ tracesys:
+ GLOBAL(int_ret_from_sys_call)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_restore_args
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+@@ -800,6 +994,16 @@ END(interrupt)
+ CFI_ADJUST_CFA_OFFSET 10*8
+ call save_args
+ PARTIAL_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ call pax_enter_kernel
++ jmp 2f
++1: call pax_enter_kernel_user
++2:
++#else
++ call pax_enter_kernel
++#endif
+ call \func
+ .endm
+
+@@ -826,7 +1030,7 @@ ret_from_intr:
+ CFI_ADJUST_CFA_OFFSET -8
+ exit_intr:
+ GET_THREAD_INFO(%rcx)
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_kernel
+
+ /* Interrupt came from user space */
+@@ -848,12 +1052,18 @@ retint_swapgs: /* return to user-space
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++
+ TRACE_IRQS_IRETQ
+ SWAPGS
+ jmp restore_args
+
+ retint_restore_args: /* return to kernel space */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ call pax_exit_kernel
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -1040,6 +1250,16 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET 15*8
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ call pax_enter_kernel
++ jmp 2f
++1: call pax_enter_kernel_user
++2:
++#else
++ call pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+@@ -1057,6 +1277,16 @@ ENTRY(\sym)