summaryrefslogtreecommitdiff
path: root/2.6.32
diff options
context:
space:
mode:
Diffstat (limited to '2.6.32')
-rw-r--r--2.6.32/0000_README2
-rw-r--r--2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch (renamed from 2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch)1257
-rw-r--r--2.6.32/4425_grsec-pax-without-grsec.patch8
-rw-r--r--2.6.32/4430_grsec-kconfig-default-gids.patch18
-rw-r--r--2.6.32/4440_selinux-avc_audit-log-curr_ip.patch2
5 files changed, 955 insertions, 332 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 495f8be..e980fa6 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -3,7 +3,7 @@ README
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch
+Patch: 4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch b/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch
index 4ed5e67..653c257 100644
--- a/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch
+++ b/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch
@@ -50,7 +50,16 @@ diff -urNp linux-2.6.32.21/arch/alpha/kernel/module.c linux-2.6.32.21/arch/alpha
for (i = 0; i < n; i++) {
diff -urNp linux-2.6.32.21/arch/alpha/kernel/osf_sys.c linux-2.6.32.21/arch/alpha/kernel/osf_sys.c
--- linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-09-17 18:34:04.000000000 -0400
+@@ -1169,7 +1169,7 @@ arch_get_unmapped_area_1(unsigned long a
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
@@ -1205,6 +1205,10 @@ arch_get_unmapped_area(struct file *filp
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
@@ -446,7 +455,7 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/fault.c linux-2.6.32.21/arch/arm/mm/fault
*
diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
--- linux-2.6.32.21/arch/arm/mm/mmap.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/arm/mm/mmap.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/arm/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400
@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
if (len > TASK_SIZE)
return -ENOMEM;
@@ -458,7 +467,13 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -75,10 +79,10 @@ arch_get_unmapped_area(struct file *filp
+@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
return addr;
}
if (len > mm->cached_hole_size) {
@@ -472,7 +487,7 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
}
full_search:
-@@ -94,8 +98,8 @@ full_search:
+@@ -94,14 +97,14 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -483,6 +498,13 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c
mm->cached_hole_size = 0;
goto full_search;
}
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
diff -urNp linux-2.6.32.21/arch/arm/plat-s3c/pm.c linux-2.6.32.21/arch/arm/plat-s3c/pm.c
--- linux-2.6.32.21/arch/arm/plat-s3c/pm.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/arm/plat-s3c/pm.c 2010-09-04 15:54:51.000000000 -0400
@@ -618,6 +640,37 @@ diff -urNp linux-2.6.32.21/arch/frv/include/asm/kmap_types.h linux-2.6.32.21/arc
KM_TYPE_NR
};
+diff -urNp linux-2.6.32.21/arch/frv/mm/elf-fdpic.c linux-2.6.32.21/arch/frv/mm/elf-fdpic.c
+--- linux-2.6.32.21/arch/frv/mm/elf-fdpic.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/frv/mm/elf-fdpic.c 2010-09-17 18:34:04.000000000 -0400
+@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ goto success;
+ }
+
+@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ goto success;
+ addr = vma->vm_end;
+ }
diff -urNp linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c
--- linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c 2010-09-04 15:54:51.000000000 -0400
@@ -1023,7 +1076,7 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.21/arch/i
.map_page = swiotlb_map_page,
diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c
--- linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-09-17 18:34:04.000000000 -0400
@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
if (REGION_NUMBER(addr) == RGN_HPAGE)
addr = 0;
@@ -1038,7 +1091,7 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64
if (!addr)
addr = mm->free_area_cache;
-@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
+@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -1050,6 +1103,12 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64
goto full_search;
}
return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
diff -urNp linux-2.6.32.21/arch/ia64/kernel/topology.c linux-2.6.32.21/arch/ia64/kernel/topology.c
--- linux-2.6.32.21/arch/ia64/kernel/topology.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/ia64/kernel/topology.c 2010-09-04 15:54:51.000000000 -0400
@@ -1126,6 +1185,18 @@ diff -urNp linux-2.6.32.21/arch/ia64/mm/fault.c linux-2.6.32.21/arch/ia64/mm/fau
survive:
/*
* If for any reason at all we couldn't handle the fault, make
+diff -urNp linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c
+--- linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400
+@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
diff -urNp linux-2.6.32.21/arch/ia64/mm/init.c linux-2.6.32.21/arch/ia64/mm/init.c
--- linux-2.6.32.21/arch/ia64/mm/init.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/ia64/mm/init.c 2010-09-04 15:54:51.000000000 -0400
@@ -1312,8 +1383,8 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/process.c linux-2.6.32.21/arch/mips/
-}
diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/kernel/syscall.c
--- linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-09-04 15:54:51.000000000 -0400
-@@ -102,6 +102,11 @@ unsigned long arch_get_unmapped_area(str
++++ linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-09-17 18:34:04.000000000 -0400
+@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
@@ -1325,8 +1396,12 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -112,7 +117,7 @@ unsigned long arch_get_unmapped_area(str
- (!vmm || addr + len <= vmm->vm_start))
+ else
+ addr = PAGE_ALIGN(addr);
+ vmm = find_vma(current->mm, addr);
+- if (task_size - len >= addr &&
+- (!vmm || addr + len <= vmm->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
return addr;
}
- addr = TASK_UNMAPPED_BASE;
@@ -1334,6 +1409,15 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
+@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (task_size - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (do_color_align)
diff -urNp linux-2.6.32.21/arch/mips/mm/fault.c linux-2.6.32.21/arch/mips/mm/fault.c
--- linux-2.6.32.21/arch/mips/mm/fault.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/mips/mm/fault.c 2010-09-04 15:54:51.000000000 -0400
@@ -1516,7 +1600,25 @@ diff -urNp linux-2.6.32.21/arch/parisc/kernel/module.c linux-2.6.32.21/arch/pari
me->arch.unwind_section, table, end, gp);
diff -urNp linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c
--- linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-09-17 18:34:04.000000000 -0400
+@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
if (flags & MAP_FIXED)
return addr;
@@ -2671,8 +2773,38 @@ diff -urNp linux-2.6.32.21/arch/powerpc/mm/mmap_64.c linux-2.6.32.21/arch/powerp
}
diff -urNp linux-2.6.32.21/arch/powerpc/mm/slice.c linux-2.6.32.21/arch/powerpc/mm/slice.c
--- linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-09-04 15:54:51.000000000 -0400
-@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un
++++ linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-09-17 18:34:04.000000000 -0400
+@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len);
+ }
+
+-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+ {
+ return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
+ 1ul << SLICE_LOW_SHIFT);
+@@ -256,7 +255,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un
if (fixed && addr > (mm->task_size - len))
return -EINVAL;
@@ -3115,6 +3247,56 @@ diff -urNp linux-2.6.32.21/arch/sh/kernel/kgdb.c linux-2.6.32.21/arch/sh/kernel/
/* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 },
+diff -urNp linux-2.6.32.21/arch/sh/mm/mmap.c linux-2.6.32.21/arch/sh/mm/mmap.c
+--- linux-2.6.32.21/arch/sh/mm/mmap.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/sh/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400
+@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -106,7 +105,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
diff -urNp linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h
--- linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h 2010-08-29 21:08:20.000000000 -0400
+++ linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h 2010-09-15 02:34:10.000000000 -0400
@@ -3669,7 +3851,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.21/arch/sp
.map_page = dma_4v_map_page,
diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c
--- linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 18:34:04.000000000 -0400
@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
if (ARCH_SUN4C && len > 0x20000000)
return -ENOMEM;
@@ -3679,9 +3861,18 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.21/arch
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr);
+@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c
--- linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 18:34:04.000000000 -0400
@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -3702,7 +3893,14 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
-@@ -153,9 +157,9 @@ unsigned long arch_get_unmapped_area(str
+@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
}
if (len > mm->cached_hole_size) {
@@ -3714,7 +3912,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
mm->cached_hole_size = 0;
}
-@@ -175,8 +179,8 @@ full_search:
+@@ -175,14 +178,14 @@ full_search:
vma = find_vma(mm, VA_EXCLUDE_END);
}
if (unlikely(task_size < addr)) {
@@ -3725,7 +3923,14 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -216,7 +220,7 @@ arch_get_unmapped_area_topdown(struct fi
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
@@ -3734,7 +3939,35 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
-@@ -384,6 +388,12 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -279,7 +281,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -384,6 +386,12 @@ void arch_pick_mmap_layout(struct mm_str
current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
sysctl_legacy_va_layout) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -3747,7 +3980,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
-@@ -398,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
+@@ -398,6 +406,12 @@ void arch_pick_mmap_layout(struct mm_str
gap = (task_size / 6 * 5);
mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -4033,8 +4266,8 @@ diff -urNp linux-2.6.32.21/arch/sparc/lib/atomic_64.S linux-2.6.32.21/arch/sparc
bne,pn %xcc, 2f
diff -urNp linux-2.6.32.21/arch/sparc/lib/ksyms.c linux-2.6.32.21/arch/sparc/lib/ksyms.c
--- linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-09-04 15:54:51.000000000 -0400
-@@ -144,12 +144,15 @@ EXPORT_SYMBOL(__downgrade_write);
++++ linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-09-17 17:45:39.000000000 -0400
+@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
/* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add);
@@ -4044,7 +4277,9 @@ diff -urNp linux-2.6.32.21/arch/sparc/lib/ksyms.c linux-2.6.32.21/arch/sparc/lib
+EXPORT_SYMBOL(atomic_sub_unchecked);
EXPORT_SYMBOL(atomic_sub_ret);
EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_unchecked);
EXPORT_SYMBOL(atomic64_sub_ret);
@@ -4969,6 +5204,46 @@ diff -urNp linux-2.6.32.21/arch/sparc/mm/fault_64.c linux-2.6.32.21/arch/sparc/m
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
+diff -urNp linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c
+--- linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400
++++ linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400
+@@ -69,7 +69,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr - len, len)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -126,7 +126,7 @@ hugetlb_get_unmapped_area_topdown(struct
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, addr, len))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -183,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
diff -urNp linux-2.6.32.21/arch/sparc/mm/init_32.c linux-2.6.32.21/arch/sparc/mm/init_32.c
--- linux-2.6.32.21/arch/sparc/mm/init_32.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/sparc/mm/init_32.c 2010-09-04 15:54:51.000000000 -0400
@@ -9393,7 +9668,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess_64.h linux-2.6.32.21/arc
#endif /* _ASM_X86_UACCESS_64_H */
diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x86/include/asm/uaccess.h
--- linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-09-16 23:14:31.000000000 -0400
@@ -8,12 +8,15 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
@@ -9458,22 +9733,9 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
/*
* The exception table consists of pairs of addresses: the first is the
-@@ -179,17 +213,34 @@ extern int __get_user_bad(void);
- __ret_gu; \
- })
-
-+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define __put_user_x(size, x, ptr, __ret_pu) \
-+ ({ \
-+ int __dummy; \
-+ asm volatile("call __put_user_" #size : "=a" (__ret_pu), "=c" (__dummy) \
-+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx"); \
-+ })
-+#else
- #define __put_user_x(size, x, ptr, __ret_pu) \
+@@ -183,13 +217,21 @@ extern int __get_user_bad(void);
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
-+#endif
-
+#ifdef CONFIG_X86_32
@@ -9496,7 +9758,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
-@@ -197,15 +248,18 @@ extern int __get_user_bad(void);
+@@ -197,15 +239,18 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
@@ -9519,7 +9781,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
-@@ -374,16 +428,18 @@ do { \
+@@ -374,16 +419,18 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -9541,7 +9803,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
#define __get_user_size_ex(x, ptr, size) \
do { \
-@@ -407,10 +463,12 @@ do { \
+@@ -407,10 +454,12 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -9556,7 +9818,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
#define __put_user_nocheck(x, ptr, size) \
({ \
-@@ -424,13 +482,24 @@ do { \
+@@ -424,13 +473,24 @@ do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
@@ -9583,7 +9845,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
/*
* Tell gcc we read from memory instead of writing: this is because
-@@ -438,21 +507,26 @@ struct __large_struct { unsigned long bu
+@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -9614,7 +9876,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
/*
* uaccess_try and catch
-@@ -530,7 +604,7 @@ struct __large_struct { unsigned long bu
+@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
@@ -9623,7 +9885,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x
} while (0)
#ifdef CONFIG_X86_WP_WORKS_OK
-@@ -567,6 +641,7 @@ extern struct movsl_mask {
+@@ -567,6 +632,7 @@ extern struct movsl_mask {
#define ARCH_HAS_NOCACHE_UACCESS 1
@@ -13721,7 +13983,26 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/signal.c linux-2.6.32.21/arch/x86/ker
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
diff -urNp linux-2.6.32.21/arch/x86/kernel/smpboot.c linux-2.6.32.21/arch/x86/kernel/smpboot.c
--- linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-09-17 17:44:35.000000000 -0400
+@@ -95,14 +95,14 @@ static DEFINE_PER_CPU(struct task_struct
+ */
+ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
+
+-void cpu_hotplug_driver_lock()
++void cpu_hotplug_driver_lock(void)
+ {
+- mutex_lock(&x86_cpu_hotplug_driver_mutex);
++ mutex_lock(&x86_cpu_hotplug_driver_mutex);
+ }
+
+-void cpu_hotplug_driver_unlock()
++void cpu_hotplug_driver_unlock(void)
+ {
+- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
++ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
+ }
+
+ ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
@@ -748,7 +748,11 @@ do_rest:
(unsigned long)task_stack_page(c_idle.idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
@@ -13792,7 +14073,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/syscall_table_32.S linux-2.6.32.21/ar
.long sys_exit
diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c
--- linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-09-17 18:34:04.000000000 -0400
@@ -24,6 +24,21 @@
#include <asm/syscalls.h>
@@ -13815,7 +14096,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/i386 didn't use to be able to handle more than
-@@ -58,6 +73,205 @@ out:
+@@ -58,6 +73,208 @@ out:
return err;
}
@@ -13844,10 +14125,11 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
-+ vma = find_vma(mm, addr);
-+ if (pax_task_size - len >= addr &&
-+ (!vma || addr + len <= vma->vm_start))
-+ return addr;
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
@@ -13887,13 +14169,8 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ }
+ return -ENOMEM;
+ }
-+ if (!vma || addr + len <= vma->vm_start) {
-+ /*
-+ * Remember the place where we stopped the search:
-+ */
-+ mm->free_area_cache = addr + len;
-+ return addr;
-+ }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
@@ -13903,6 +14180,12 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ goto full_search;
+ }
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+}
+
+unsigned long
@@ -13938,10 +14221,11 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
-+ vma = find_vma(mm, addr);
-+ if (pax_task_size - len >= addr &&
-+ (!vma || addr + len <= vma->vm_start))
-+ return addr;
++ if (pax_task_size - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
@@ -13956,7 +14240,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
-+ if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
@@ -13973,7 +14257,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
-+ if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
@@ -14021,7 +14305,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
struct sel_arg_struct {
unsigned long n;
-@@ -93,7 +307,7 @@ asmlinkage int sys_ipc(uint call, int fi
+@@ -93,7 +310,7 @@ asmlinkage int sys_ipc(uint call, int fi
return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
case SEMTIMEDOP:
return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
@@ -14030,7 +14314,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
case SEMGET:
return sys_semget(first, second, third);
-@@ -140,7 +354,7 @@ asmlinkage int sys_ipc(uint call, int fi
+@@ -140,7 +357,7 @@ asmlinkage int sys_ipc(uint call, int fi
ret = do_shmat(first, (char __user *) ptr, second, &raddr);
if (ret)
return ret;
@@ -14041,7 +14325,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8
if (!segment_eq(get_fs(), get_ds()))
diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c
--- linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-09-17 18:34:04.000000000 -0400
@@ -32,8 +32,8 @@ out:
return error;
}
@@ -14062,7 +14346,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
*end = TASK_SIZE;
}
}
-@@ -69,11 +69,15 @@ arch_get_unmapped_area(struct file *filp
+@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -14079,7 +14363,22 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
-@@ -128,7 +132,7 @@ arch_get_unmapped_area_topdown(struct fi
+- if (end - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+@@ -106,7 +109,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -14088,7 +14387,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -141,6 +145,10 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi
if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
goto bottomup;
@@ -14099,7 +14398,32 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
-@@ -198,13 +206,21 @@ bottomup:
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
+ return addr;
+ }
+
+@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr-len;
+ }
+@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+
+@@ -198,13 +204,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -14599,22 +14923,13 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmi_32.c linux-2.6.32.21/arch/x86/ker
local_irq_save(flags);
diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S
--- linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-09-04 15:54:51.000000000 -0400
-@@ -26,6 +26,22 @@
++++ linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-09-17 17:29:28.000000000 -0400
+@@ -26,6 +26,13 @@
#include <asm/page_types.h>
#include <asm/cache.h>
#include <asm/boot.h>
+#include <asm/segment.h>
+
-+#undef PMD_SIZE
-+#undef PMD_SHIFT
-+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-+#define PMD_SHIFT 21
-+#else
-+#define PMD_SHIFT 22
-+#endif
-+#define PMD_SIZE (1 << PMD_SHIFT)
-+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
+#else
@@ -14623,7 +14938,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
#undef i386 /* in case the preprocessor is a 32bit one */
-@@ -34,40 +50,55 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
+@@ -34,40 +41,55 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
@@ -14689,7 +15004,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
HEAD_TEXT
#ifdef CONFIG_X86_32
. = ALIGN(PAGE_SIZE);
-@@ -82,28 +113,69 @@ SECTIONS
+@@ -82,28 +104,69 @@ SECTIONS
IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
@@ -14766,7 +15081,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
PAGE_ALIGNED_DATA(PAGE_SIZE)
-@@ -166,12 +238,6 @@ SECTIONS
+@@ -166,12 +229,6 @@ SECTIONS
}
vgetcpu_mode = VVIRT(.vgetcpu_mode);
@@ -14779,7 +15094,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
.vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
*(.vsyscall_3)
}
-@@ -187,12 +253,19 @@ SECTIONS
+@@ -187,12 +244,19 @@ SECTIONS
#endif /* CONFIG_X86_64 */
/* Init code and data - will be freed after init */
@@ -14802,7 +15117,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
/*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - .init.text - should
-@@ -201,12 +274,27 @@ SECTIONS
+@@ -201,12 +265,27 @@ SECTIONS
PERCPU_VADDR(0, :percpu)
#endif
@@ -14818,7 +15133,8 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
+ VMLINUX_SYMBOL(_einittext) = .;
+ . = ALIGN(PAGE_SIZE);
+ } :text.init
-+
+
+- INIT_DATA_SECTION(16)
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .altinstructions and .eh_frame
@@ -14828,14 +15144,13 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
+ . = ALIGN(16);
+ } :text.exit
+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
-
-- INIT_DATA_SECTION(16)
++
+ . = ALIGN(PAGE_SIZE);
+ INIT_DATA_SECTION(16) :init
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
-@@ -232,19 +320,11 @@ SECTIONS
+@@ -232,19 +311,11 @@ SECTIONS
*(.altinstr_replacement)
}
@@ -14856,7 +15171,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
PERCPU(PAGE_SIZE)
#endif
-@@ -267,12 +347,6 @@ SECTIONS
+@@ -267,12 +338,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
}
@@ -14869,7 +15184,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
/* BSS */
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-@@ -288,6 +362,7 @@ SECTIONS
+@@ -288,6 +353,7 @@ SECTIONS
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
*(.brk_reservation) /* areas brk users have reserved */
@@ -14877,7 +15192,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8
__brk_limit = .;
}
-@@ -316,13 +391,12 @@ SECTIONS
+@@ -316,13 +382,12 @@ SECTIONS
* for the boot processor.
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
@@ -18096,7 +18411,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/highmem_32.c linux-2.6.32.21/arch/x86/mm/
}
diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm/hugetlbpage.c
--- linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400
@@ -267,13 +267,18 @@ static unsigned long hugetlb_get_unmappe
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
@@ -18120,7 +18435,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
}
full_search:
-@@ -281,13 +286,13 @@ full_search:
+@@ -281,26 +286,27 @@ full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@@ -18137,18 +18452,38 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -310,9 +315,8 @@ static unsigned long hugetlb_get_unmappe
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = ALIGN(vma->vm_end, huge_page_size(h));
+ }
++
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+
+ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -309,10 +315,9 @@ static unsigned long hugetlb_get_unmappe
+ {
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev_vma;
+- struct vm_area_struct *vma, *prev_vma;
- unsigned long base = mm->mmap_base, addr = addr0;
++ struct vm_area_struct *vma;
+ unsigned long base = mm->mmap_base, addr;
unsigned long largest_hole = mm->cached_hole_size;
- int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
-@@ -322,7 +326,7 @@ static unsigned long hugetlb_get_unmappe
+@@ -322,7 +327,7 @@ static unsigned long hugetlb_get_unmappe
largest_hole = 0;
mm->free_area_cache = base;
}
@@ -18157,7 +18492,51 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
goto fail;
-@@ -364,22 +368,26 @@ try_again:
+@@ -330,33 +335,27 @@ try_again:
+ /* either no address requested or cant fit in requested address hole */
+ addr = (mm->free_area_cache - len) & huge_page_mask(h);
+ do {
++ vma = find_vma(mm, addr);
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+- */
+- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+- return addr;
+-
+- /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+- if (addr + len <= vma->vm_start &&
+- (!prev_vma || (addr >= prev_vma->vm_end))) {
++ if (check_heap_stack_gap(vma, addr, len)) {
+ /* remember the address as a hint for next time */
+- mm->cached_hole_size = largest_hole;
+- return (mm->free_area_cache = addr);
+- } else {
+- /* pull free_area_cache down to the first hole */
+- if (mm->free_area_cache == vma->vm_end) {
+- mm->free_area_cache = vma->vm_start;
+- mm->cached_hole_size = largest_hole;
+- }
++ mm->cached_hole_size = largest_hole;
++ return (mm->free_area_cache = addr);
++ }
++ /* pull free_area_cache down to the first hole */
++ if (mm->free_area_cache == vma->vm_end) {
++ mm->free_area_cache = vma->vm_start;
++ mm->cached_hole_size = largest_hole;
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+- largest_hole = vma->vm_start - addr;
++ largest_hole = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = (vma->vm_start - len) & huge_page_mask(h);
+@@ -364,22 +363,26 @@ try_again:
fail:
/*
@@ -18195,7 +18574,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
mm->cached_hole_size = ~0UL;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
-@@ -387,6 +395,7 @@ fail:
+@@ -387,6 +390,7 @@ fail:
/*
* Restore the topdown base:
*/
@@ -18203,7 +18582,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
-@@ -400,10 +409,17 @@ hugetlb_get_unmapped_area(struct file *f
+@@ -400,10 +404,17 @@ hugetlb_get_unmapped_area(struct file *f
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -18222,15 +18601,16 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm
return -ENOMEM;
if (flags & MAP_FIXED) {
-@@ -415,7 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
+@@ -415,8 +426,7 @@ hugetlb_get_unmapped_area(struct file *f
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
-+ if (pax_task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+- (!vma || addr + len <= vma->vm_start))
++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
return addr;
}
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
diff -urNp linux-2.6.32.21/arch/x86/mm/init_32.c linux-2.6.32.21/arch/x86/mm/init_32.c
--- linux-2.6.32.21/arch/x86/mm/init_32.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/arch/x86/mm/init_32.c 2010-09-04 15:54:51.000000000 -0400
@@ -18602,7 +18982,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/init_64.c linux-2.6.32.21/arch/x86/mm/ini
return "[vsyscall]";
diff -urNp linux-2.6.32.21/arch/x86/mm/init.c linux-2.6.32.21/arch/x86/mm/init.c
--- linux-2.6.32.21/arch/x86/mm/init.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/mm/init.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/mm/init.c 2010-09-16 22:50:17.000000000 -0400
@@ -69,11 +69,7 @@ static void __init find_early_table_spac
* cause a hotspot and fill up ZONE_DMA. The page tables
* need roughly 0.5KB per GB.
@@ -18616,6 +18996,15 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/init.c linux-2.6.32.21/arch/x86/mm/init.c
e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
tables, PAGE_SIZE);
if (e820_table_start == -1UL)
+@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
+ #endif
+
+ set_nx();
+- if (nx_enabled)
++ if (nx_enabled && cpu_has_nx)
+ printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+
+ /* Enable PSE if available */
@@ -331,7 +327,13 @@ unsigned long __init_refok init_memory_m
*/
int devmem_is_allowed(unsigned long pagenr)
@@ -20213,7 +20602,7 @@ diff -urNp linux-2.6.32.21/arch/x86/vdso/vma.c linux-2.6.32.21/arch/x86/vdso/vma
-__setup("vdso=", vdso_setup);
diff -urNp linux-2.6.32.21/arch/x86/xen/enlighten.c linux-2.6.32.21/arch/x86/xen/enlighten.c
--- linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-09-04 15:54:51.000000000 -0400
++++ linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-09-17 17:30:16.000000000 -0400
@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
@@ -20241,10 +20630,10 @@ diff -urNp linux-2.6.32.21/arch/x86/xen/enlighten.c linux-2.6.32.21/arch/x86/xen
- check_efer();
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
-+ (cpuid_edx(0x80000001) & (1 << (X86_FEATURE_NX & 31)))) {
++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
+ unsigned l, h;
+
-+#if defined(CONFIG_X86_32)
++#ifdef CONFIG_X86_PAE
+ nx_enabled = 1;
+#endif
+ __supported_pte_mask |= _PAGE_NX;
@@ -31772,19 +32161,6 @@ diff -urNp linux-2.6.32.21/fs/ext4/balloc.c linux-2.6.32.21/fs/ext4/balloc.c
if (free_blocks >= (nblocks + dirty_blocks))
return 1;
}
-diff -urNp linux-2.6.32.21/fs/ext4/ioctl.c linux-2.6.32.21/fs/ext4/ioctl.c
---- linux-2.6.32.21/fs/ext4/ioctl.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/fs/ext4/ioctl.c 2010-09-04 15:54:52.000000000 -0400
-@@ -230,6 +230,9 @@ setversion_out:
- struct file *donor_filp;
- int err;
-
-+ /* temporary workaround for bugs in here */
-+ return -EOPNOTSUPP;
-+
- if (!(filp->f_mode & FMODE_READ) ||
- !(filp->f_mode & FMODE_WRITE))
- return -EBADF;
diff -urNp linux-2.6.32.21/fs/ext4/namei.c linux-2.6.32.21/fs/ext4/namei.c
--- linux-2.6.32.21/fs/ext4/namei.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/fs/ext4/namei.c 2010-09-04 15:54:52.000000000 -0400
@@ -34418,7 +34794,7 @@ diff -urNp linux-2.6.32.21/fs/proc/root.c linux-2.6.32.21/fs/proc/root.c
diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
--- linux-2.6.32.21/fs/proc/task_mmu.c 2010-08-29 21:08:16.000000000 -0400
-+++ linux-2.6.32.21/fs/proc/task_mmu.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/fs/proc/task_mmu.c 2010-09-17 18:40:06.000000000 -0400
@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
@@ -34462,15 +34838,30 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
-@@ -223,13 +240,22 @@ static void show_map_vma(struct seq_file
- start += PAGE_SIZE;
+@@ -206,7 +223,6 @@ static void show_map_vma(struct seq_file
+ int flags = vma->vm_flags;
+ unsigned long ino = 0;
+ unsigned long long pgoff = 0;
+- unsigned long start;
+ dev_t dev = 0;
+ int len;
+@@ -217,19 +233,23 @@ static void show_map_vma(struct seq_file
+ pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
+ }
+
+- /* We don't show the stack guard page in /proc/maps */
+- start = vma->vm_start;
+- if (vma->vm_flags & VM_GROWSDOWN)
+- start += PAGE_SIZE;
+-
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
+- start,
+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ PAX_RAND_FLAGS(mm) ? 0UL : start,
++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
+#else
- start,
++ vma->vm_start,
vma->vm_end,
+#endif
flags & VM_READ ? 'r' : '-',
@@ -34485,7 +34876,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
MAJOR(dev), MINOR(dev), ino, &len);
/*
-@@ -238,16 +264,16 @@ static void show_map_vma(struct seq_file
+@@ -238,16 +258,16 @@ static void show_map_vma(struct seq_file
*/
if (file) {
pad_len_spaces(m, len);
@@ -34507,7 +34898,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
name = "[stack]";
}
} else {
-@@ -390,9 +416,16 @@ static int show_smap(struct seq_file *m,
+@@ -390,9 +410,16 @@ static int show_smap(struct seq_file *m,
};
memset(&mss, 0, sizeof mss);
@@ -34527,7 +34918,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c
show_map_vma(m, vma);
-@@ -408,7 +441,11 @@ static int show_smap(struct seq_file *m,
+@@ -408,7 +435,11 @@ static int show_smap(struct seq_file *m,
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n",
@@ -41692,8 +42083,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_fork.c linux-2.6.32.21/grsecurity/gr
+}
diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/grsec_init.c
--- linux-2.6.32.21/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/grsec_init.c 2010-09-04 15:54:52.000000000 -0400
-@@ -0,0 +1,258 @@
++++ linux-2.6.32.21/grsecurity/grsec_init.c 2010-09-17 19:24:55.000000000 -0400
+@@ -0,0 +1,266 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
@@ -41742,6 +42133,7 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/gr
+#endif
+int grsec_lastack_retries;
+int grsec_enable_tpe_all;
++int grsec_enable_tpe_invert;
+int grsec_enable_socket_all;
+int grsec_socket_all_gid;
+int grsec_enable_socket_client;
@@ -41832,6 +42224,13 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/gr
+#endif
+#endif
+
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ /* for backward compatibility, tpe_invert always defaults to on if
++ enabled in the kernel
++ */
++ grsec_enable_tpe_invert = 1;
++#endif
++
+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
+#ifndef CONFIG_GRKERNSEC_SYSCTL
+ grsec_lock = 1;
@@ -42828,8 +43227,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_sock.c linux-2.6.32.21/grsecurity/gr
+}
diff -urNp linux-2.6.32.21/grsecurity/grsec_sysctl.c linux-2.6.32.21/grsecurity/grsec_sysctl.c
--- linux-2.6.32.21/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/grsec_sysctl.c 2010-09-04 15:54:52.000000000 -0400
-@@ -0,0 +1,459 @@
++++ linux-2.6.32.21/grsecurity/grsec_sysctl.c 2010-09-17 19:22:27.000000000 -0400
+@@ -0,0 +1,469 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
@@ -43103,6 +43502,16 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_sysctl.c linux-2.6.32.21/grsecurity/
+ .proc_handler = &proc_dointvec,
+ },
+#endif
++#ifdef CONFIG_GRKERNSEC_TPE_INVERT
++ {
++ .ctl_name = CTL_UNNUMBERED,
++ .procname = "tpe_invert",
++ .data = &grsec_enable_tpe_invert,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
+ {
+ .ctl_name = CTL_UNNUMBERED,
@@ -43328,8 +43737,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_time.c linux-2.6.32.21/grsecurity/gr
+}
diff -urNp linux-2.6.32.21/grsecurity/grsec_tpe.c linux-2.6.32.21/grsecurity/grsec_tpe.c
--- linux-2.6.32.21/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/grsec_tpe.c 2010-09-04 15:54:52.000000000 -0400
-@@ -0,0 +1,38 @@
++++ linux-2.6.32.21/grsecurity/grsec_tpe.c 2010-09-17 19:28:20.000000000 -0400
+@@ -0,0 +1,39 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/file.h>
@@ -43347,7 +43756,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_tpe.c linux-2.6.32.21/grsecurity/grs
+
+ if (cred->uid && ((grsec_enable_tpe &&
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+ !in_group_p(grsec_tpe_gid)
++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
+#else
+ in_group_p(grsec_tpe_gid)
+#endif
@@ -43435,8 +43845,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsum.c linux-2.6.32.21/grsecurity/grsum.c
+}
diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
--- linux-2.6.32.21/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/grsecurity/Kconfig 2010-09-14 21:34:38.000000000 -0400
-@@ -0,0 +1,987 @@
++++ linux-2.6.32.21/grsecurity/Kconfig 2010-09-17 19:36:28.000000000 -0400
+@@ -0,0 +1,986 @@
+#
+# grecurity configuration
+#
@@ -43588,7 +43998,7 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
+ select PAX_PT_PAX_FLAGS
+ select PAX_HAVE_ACL_FLAGS
+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
-+ select PAX_MEMORY_UDEREF if (X86_32 && !XEN)
++ select PAX_MEMORY_UDEREF if (X86 && !XEN)
+ select PAX_RANDKSTACK if (X86_TSC && !X86_64)
+ select PAX_SEGMEXEC if (X86_32)
+ select PAX_PAGEEXEC
@@ -44197,11 +44607,14 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
+ is enabled, a sysctl option with name "tpe" is created.
+
+config GRKERNSEC_TPE_ALL
-+ bool "Partially restrict non-root users"
++ bool "Partially restrict all non-root users"
+ depends on GRKERNSEC_TPE
+ help
-+ If you say Y here, All non-root users other than the ones in the
-+ group specified in the main TPE option will only be allowed to
++ If you say Y here, all non-root users will be covered under
++ a weaker TPE restriction. This is separate from, and in addition to,
++ the main TPE options that you have selected elsewhere. Thus, if a
++ "trusted" GID is chosen, this restriction applies to even that GID.
++ Under this restriction, all non-root users will only be allowed to
+ execute files in directories they own that are not group or
+ world-writable, or in directories owned by root and writable only by
+ root. If the sysctl option is enabled, a sysctl option with name
@@ -44214,31 +44627,27 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig
+ If you say Y here, the group you specify in the TPE configuration will
+ decide what group TPE restrictions will be *disabled* for. This
+ option is useful if you want TPE restrictions to be applied to most
-+ users on the system.
++ users on the system. If the sysctl option is enabled, a sysctl option
++ with name "tpe_invert" is created. Unlike other sysctl options, this
++ entry will default to on for backward-compatibility.
+
+config GRKERNSEC_TPE_GID
+ int "GID for untrusted users"
+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
+ default 1005
+ help
-+ If you have selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *disabled* for.
-+ If you have not selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *enabled* for.
-+ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+ is created.
++ Setting this GID determines what group TPE restrictions will be
++ *enabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
+
+config GRKERNSEC_TPE_GID
+ int "GID for trusted users"
+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
+ default 1005
+ help
-+ If you have selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *disabled* for.
-+ If you have not selected the "Invert GID option" above, setting this
-+ GID determines what group TPE restrictions will be *enabled* for.
-+ If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+ is created.
++ Setting this GID determines what group TPE restrictions will be
++ *disabled* for. If the sysctl option is enabled, a sysctl option
++ with name "tpe_gid" is created.
+
+endmenu
+menu "Network Protections"
@@ -46216,7 +46625,7 @@ diff -urNp linux-2.6.32.21/include/linux/grdefs.h linux-2.6.32.21/include/linux/
+#endif
diff -urNp linux-2.6.32.21/include/linux/grinternal.h linux-2.6.32.21/include/linux/grinternal.h
--- linux-2.6.32.21/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
-+++ linux-2.6.32.21/include/linux/grinternal.h 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/include/linux/grinternal.h 2010-09-17 19:39:50.000000000 -0400
@@ -0,0 +1,211 @@
+#ifndef __GRINTERNAL_H
+#define __GRINTERNAL_H
@@ -46282,7 +46691,7 @@ diff -urNp linux-2.6.32.21/include/linux/grinternal.h linux-2.6.32.21/include/li
+extern int grsec_enable_tpe;
+extern int grsec_tpe_gid;
+extern int grsec_enable_tpe_all;
-+extern int grsec_enable_sidcaps;
++extern int grsec_enable_tpe_invert;
+extern int grsec_enable_socket_all;
+extern int grsec_socket_all_gid;
+extern int grsec_enable_socket_client;
@@ -47499,7 +47908,7 @@ diff -urNp linux-2.6.32.21/include/linux/reiserfs_fs_sb.h linux-2.6.32.21/includ
on-disk FS format */
diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/sched.h
--- linux-2.6.32.21/include/linux/sched.h 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/include/linux/sched.h 2010-09-14 18:41:02.000000000 -0400
++++ linux-2.6.32.21/include/linux/sched.h 2010-09-17 18:34:04.000000000 -0400
@@ -101,6 +101,7 @@ struct bio;
struct fs_struct;
struct bts_context;
@@ -47508,7 +47917,19 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
/*
* List of flags we want to share for kernel threads,
-@@ -667,6 +668,15 @@ struct signal_struct {
+@@ -372,9 +373,11 @@ struct user_namespace;
+ #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+ extern int sysctl_max_map_count;
++extern unsigned long sysctl_heap_stack_gap;
+
+ #include <linux/aio.h>
+
++extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+@@ -667,6 +670,15 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
@@ -47524,7 +47945,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
int oom_adj; /* OOM kill score adjustment (bit shift) */
};
-@@ -1220,7 +1230,7 @@ struct rcu_node;
+@@ -1220,7 +1232,7 @@ struct rcu_node;
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -47533,7 +47954,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
-@@ -1332,8 +1342,8 @@ struct task_struct {
+@@ -1332,8 +1344,8 @@ struct task_struct {
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
@@ -47544,7 +47965,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
-@@ -1347,16 +1357,6 @@ struct task_struct {
+@@ -1347,16 +1359,6 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -47561,7 +47982,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
-@@ -1440,6 +1440,15 @@ struct task_struct {
+@@ -1440,6 +1442,15 @@ struct task_struct {
int hardirq_context;
int softirq_context;
#endif
@@ -47577,7 +47998,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
-@@ -1460,6 +1469,9 @@ struct task_struct {
+@@ -1460,6 +1471,9 @@ struct task_struct {
struct backing_dev_info *backing_dev_info;
@@ -47587,7 +48008,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
struct io_context *io_context;
unsigned long ptrace_message;
-@@ -1523,6 +1535,20 @@ struct task_struct {
+@@ -1523,6 +1537,20 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
@@ -47608,7 +48029,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
-@@ -1546,6 +1572,52 @@ struct task_struct {
+@@ -1546,6 +1574,52 @@ struct task_struct {
#endif /* CONFIG_TRACING */
};
@@ -47661,7 +48082,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
-@@ -2146,7 +2218,7 @@ extern void __cleanup_sighand(struct sig
+@@ -2146,7 +2220,7 @@ extern void __cleanup_sighand(struct sig
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
@@ -47670,7 +48091,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
extern void daemonize(const char *, ...);
extern int allow_signal(int);
-@@ -2259,8 +2331,8 @@ static inline void unlock_task_sighand(s
+@@ -2259,8 +2333,8 @@ static inline void unlock_task_sighand(s
#ifndef __HAVE_THREAD_FUNCTIONS
@@ -47681,7 +48102,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
-@@ -2275,13 +2347,17 @@ static inline unsigned long *end_of_stac
+@@ -2275,13 +2349,17 @@ static inline unsigned long *end_of_stac
#endif
@@ -49315,7 +49736,7 @@ diff -urNp linux-2.6.32.21/kernel/fork.c linux-2.6.32.21/kernel/fork.c
new_fs = fs;
diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c
--- linux-2.6.32.21/kernel/futex.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/kernel/futex.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/kernel/futex.c 2010-09-17 17:43:01.000000000 -0400
@@ -54,6 +54,7 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
@@ -49345,19 +49766,17 @@ diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
-@@ -2376,7 +2382,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+@@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
{
struct robust_list_head __user *head;
unsigned long ret;
-- const struct cred *cred = current_cred(), *pcred;
+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
-+ const struct cred *cred = current_cred();
-+ const struct cred *pcred;
+ const struct cred *cred = current_cred(), *pcred;
+#endif
if (!futex_cmpxchg_enabled)
return -ENOSYS;
-@@ -2392,11 +2401,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
+@@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
if (!p)
goto err_unlock;
ret = -EPERM;
@@ -49374,7 +49793,7 @@ diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c
head = p->robust_list;
rcu_read_unlock();
}
-@@ -2458,7 +2472,7 @@ retry:
+@@ -2458,7 +2471,7 @@ retry:
*/
static inline int fetch_robust_entry(struct robust_list __user **entry,
struct robust_list __user * __user *head,
@@ -50948,7 +51367,7 @@ diff -urNp linux-2.6.32.21/kernel/sys.c linux-2.6.32.21/kernel/sys.c
}
diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
--- linux-2.6.32.21/kernel/sysctl.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/kernel/sysctl.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/kernel/sysctl.c 2010-09-17 18:34:04.000000000 -0400
@@ -63,6 +63,13 @@
static int deprecated_sysctl_warning(struct __sysctl_args *args);
@@ -51018,7 +51437,21 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
{
.ctl_name = CTL_UNNUMBERED,
.procname = "sched_child_runs_first",
-@@ -1803,6 +1844,8 @@ static int do_sysctl_strategy(struct ctl
+@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
++ {
++ .procname = "heap_stack_gap",
++ .data = &sysctl_heap_stack_gap,
++ .maxlen = sizeof(sysctl_heap_stack_gap),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax,
++ },
+ #else
+ {
+ .ctl_name = CTL_UNNUMBERED,
+@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
return 0;
}
@@ -51027,7 +51460,7 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
static int parse_table(int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen,
-@@ -1821,7 +1864,7 @@ repeat:
+@@ -1821,7 +1871,7 @@ repeat:
if (n == table->ctl_name) {
int error;
if (table->child) {
@@ -51036,7 +51469,7 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c
return -EPERM;
name++;
nlen--;
-@@ -1906,6 +1949,33 @@ int sysctl_perm(struct ctl_table_root *r
+@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
int error;
int mode;
@@ -51260,25 +51693,6 @@ diff -urNp linux-2.6.32.21/kernel/trace/ftrace.c linux-2.6.32.21/kernel/trace/ft
}
/*
-diff -urNp linux-2.6.32.21/kernel/trace/Kconfig linux-2.6.32.21/kernel/trace/Kconfig
---- linux-2.6.32.21/kernel/trace/Kconfig 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/kernel/trace/Kconfig 2010-09-04 15:54:52.000000000 -0400
-@@ -126,6 +126,7 @@ if FTRACE
- config FUNCTION_TRACER
- bool "Kernel Function Tracer"
- depends on HAVE_FUNCTION_TRACER
-+ depends on !PAX_KERNEXEC
- select FRAME_POINTER
- select KALLSYMS
- select GENERIC_TRACER
-@@ -343,6 +344,7 @@ config POWER_TRACER
- config STACK_TRACER
- bool "Trace max stack"
- depends on HAVE_FUNCTION_TRACER
-+ depends on !PAX_KERNEXEC
- select FUNCTION_TRACER
- select STACKTRACE
- select KALLSYMS
diff -urNp linux-2.6.32.21/kernel/trace/ring_buffer.c linux-2.6.32.21/kernel/trace/ring_buffer.c
--- linux-2.6.32.21/kernel/trace/ring_buffer.c 2010-08-13 16:24:37.000000000 -0400
+++ linux-2.6.32.21/kernel/trace/ring_buffer.c 2010-09-04 15:54:52.000000000 -0400
@@ -51821,16 +52235,8 @@ diff -urNp linux-2.6.32.21/mm/madvise.c linux-2.6.32.21/mm/madvise.c
goto out;
diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
--- linux-2.6.32.21/mm/memory.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/memory.c 2010-09-04 15:54:52.000000000 -0400
-@@ -48,6 +48,7 @@
- #include <linux/ksm.h>
- #include <linux/rmap.h>
- #include <linux/module.h>
-+#include <linux/security.h>
- #include <linux/delayacct.h>
- #include <linux/init.h>
- #include <linux/writeback.h>
-@@ -187,8 +188,12 @@ static inline void free_pmd_range(struct
++++ linux-2.6.32.21/mm/memory.c 2010-09-17 18:20:06.000000000 -0400
+@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
return;
pmd = pmd_offset(pud, start);
@@ -51843,7 +52249,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
}
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -220,8 +225,12 @@ static inline void free_pud_range(struct
+@@ -220,8 +224,12 @@ static inline void free_pud_range(struct
return;
pud = pud_offset(pgd, start);
@@ -51856,7 +52262,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
}
/*
-@@ -1251,10 +1260,10 @@ int __get_user_pages(struct task_struct
+@@ -1251,10 +1259,10 @@ int __get_user_pages(struct task_struct
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
@@ -51869,7 +52275,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
-@@ -1306,7 +1315,7 @@ int __get_user_pages(struct task_struct
+@@ -1306,7 +1314,7 @@ int __get_user_pages(struct task_struct
continue;
}
@@ -51878,7 +52284,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-@@ -1381,7 +1390,7 @@ int __get_user_pages(struct task_struct
+@@ -1381,7 +1389,7 @@ int __get_user_pages(struct task_struct
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
@@ -51887,7 +52293,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
return i;
}
-@@ -1977,6 +1986,186 @@ static inline void cow_user_page(struct
+@@ -1977,6 +1985,186 @@ static inline void cow_user_page(struct
copy_user_highpage(dst, src, va, vma);
}
@@ -52074,7 +52480,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2156,6 +2345,12 @@ gotten:
+@@ -2156,6 +2344,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -52087,7 +52493,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter(mm, file_rss);
-@@ -2207,6 +2402,10 @@ gotten:
+@@ -2207,6 +2401,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -52098,7 +52504,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2604,6 +2803,11 @@ static int do_swap_page(struct mm_struct
+@@ -2604,6 +2802,11 @@ static int do_swap_page(struct mm_struct
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -52110,7 +52516,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock_page(page);
if (flags & FAULT_FLAG_WRITE) {
-@@ -2615,6 +2819,11 @@ static int do_swap_page(struct mm_struct
+@@ -2615,6 +2818,11 @@ static int do_swap_page(struct mm_struct
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, pte);
@@ -52122,7 +52528,41 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2665,7 +2874,7 @@ static int do_anonymous_page(struct mm_s
+@@ -2630,33 +2838,6 @@ out_release:
+ }
+
+ /*
+- * This is like a special single-page "expand_downwards()",
+- * except we must first make sure that 'address-PAGE_SIZE'
+- * doesn't hit another vma.
+- *
+- * The "find_vma()" will do the right thing even if we wrap
+- */
+-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+-{
+- address &= PAGE_MASK;
+- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+- struct vm_area_struct *prev = vma->vm_prev;
+-
+- /*
+- * Is there a mapping abutting this one below?
+- *
+- * That's only ok if it's the same stack mapping
+- * that has gotten split..
+- */
+- if (prev && prev->vm_end == address)
+- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+-
+- expand_stack(vma, address - PAGE_SIZE);
+- }
+- return 0;
+-}
+-
+-/*
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
+@@ -2665,27 +2846,23 @@ static int do_anonymous_page(struct mm_s
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -52131,7 +52571,31 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
spinlock_t *ptl;
pte_t entry;
-@@ -2704,6 +2913,11 @@ static int do_anonymous_page(struct mm_s
+- pte_unmap(page_table);
+-
+- /* Check if we need to add a guard page to the stack */
+- if (check_stack_guard_page(vma, address) < 0)
+- return VM_FAULT_SIGBUS;
+-
+- /* Use the zero-page for reads */
+ if (!(flags & FAULT_FLAG_WRITE)) {
+ entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+ vma->vm_page_prot));
+- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
++ ptl = pte_lockptr(mm, pmd);
++ spin_lock(ptl);
+ if (!pte_none(*page_table))
+ goto unlock;
+ goto setpte;
+ }
+
+ /* Allocate our own private page. */
++ pte_unmap(page_table);
++
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+@@ -2704,6 +2881,11 @@ static int do_anonymous_page(struct mm_s
if (!pte_none(*page_table))
goto release;
@@ -52143,7 +52607,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
inc_mm_counter(mm, anon_rss);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -2711,6 +2925,12 @@ setpte:
+@@ -2711,6 +2893,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, entry);
@@ -52156,7 +52620,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2853,6 +3073,12 @@ static int __do_fault(struct mm_struct *
+@@ -2853,6 +3041,12 @@ static int __do_fault(struct mm_struct *
*/
/* Only go through if we didn't race with anybody else... */
if (likely(pte_same(*page_table, orig_pte))) {
@@ -52169,7 +52633,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
-@@ -2872,6 +3098,14 @@ static int __do_fault(struct mm_struct *
+@@ -2872,6 +3066,14 @@ static int __do_fault(struct mm_struct *
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, entry);
@@ -52184,7 +52648,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
} else {
if (charged)
mem_cgroup_uncharge_page(page);
-@@ -3019,6 +3253,12 @@ static inline int handle_pte_fault(struc
+@@ -3019,6 +3221,12 @@ static inline int handle_pte_fault(struc
if (flags & FAULT_FLAG_WRITE)
flush_tlb_page(vma, address);
}
@@ -52197,7 +52661,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3035,6 +3275,10 @@ int handle_mm_fault(struct mm_struct *mm
+@@ -3035,6 +3243,10 @@ int handle_mm_fault(struct mm_struct *mm
pmd_t *pmd;
pte_t *pte;
@@ -52208,7 +52672,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
-@@ -3042,6 +3286,34 @@ int handle_mm_fault(struct mm_struct *mm
+@@ -3042,6 +3254,34 @@ int handle_mm_fault(struct mm_struct *mm
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
@@ -52243,7 +52707,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3139,7 +3411,7 @@ static int __init gate_vma_init(void)
+@@ -3139,7 +3379,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -52386,7 +52850,7 @@ diff -urNp linux-2.6.32.21/mm/migrate.c linux-2.6.32.21/mm/migrate.c
goto out;
diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
--- linux-2.6.32.21/mm/mlock.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/mlock.c 2010-09-04 15:54:56.000000000 -0400
++++ linux-2.6.32.21/mm/mlock.c 2010-09-17 18:47:09.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
@@ -52395,7 +52859,40 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/rmap.h>
-@@ -454,6 +455,9 @@ static int do_mlock(unsigned long start,
+@@ -138,19 +139,6 @@ void munlock_vma_page(struct page *page)
+ }
+ }
+
+-/* Is the vma a continuation of the stack vma above it? */
+-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+-}
+-
+-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+-{
+- return (vma->vm_flags & VM_GROWSDOWN) &&
+- (vma->vm_start == addr) &&
+- !vma_stack_continue(vma->vm_prev, addr);
+-}
+-
+ /**
+ * __mlock_vma_pages_range() - mlock a range of pages in the vma.
+ * @vma: target vma
+@@ -183,12 +171,6 @@ static long __mlock_vma_pages_range(stru
+ if (vma->vm_flags & VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+- /* We don't try to access the guard page of a stack vma */
+- if (stack_guard_page(vma, start)) {
+- addr += PAGE_SIZE;
+- nr_pages--;
+- }
+-
+ while (nr_pages > 0) {
+ int i;
+
+@@ -454,6 +436,9 @@ static int do_mlock(unsigned long start,
return -EINVAL;
if (end == start)
return 0;
@@ -52405,7 +52902,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
vma = find_vma_prev(current->mm, start, &prev);
if (!vma || vma->vm_start > start)
return -ENOMEM;
-@@ -464,6 +468,11 @@ static int do_mlock(unsigned long start,
+@@ -464,6 +449,11 @@ static int do_mlock(unsigned long start,
for (nstart = start ; ; ) {
unsigned int newflags;
@@ -52417,7 +52914,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
newflags = vma->vm_flags | VM_LOCKED;
-@@ -513,6 +522,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+@@ -513,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
lock_limit >>= PAGE_SHIFT;
/* check against resource limits */
@@ -52425,7 +52922,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = do_mlock(start, len, 1);
up_write(&current->mm->mmap_sem);
-@@ -534,17 +544,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
+@@ -534,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
static int do_mlockall(int flags)
{
struct vm_area_struct * vma, * prev = NULL;
@@ -52453,17 +52950,17 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c
newflags = vma->vm_flags | VM_LOCKED;
if (!(flags & MCL_CURRENT))
newflags &= ~VM_LOCKED;
-@@ -576,6 +592,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+@@ -576,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
lock_limit >>= PAGE_SHIFT;
ret = -ENOMEM;
-+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
--- linux-2.6.32.21/mm/mmap.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/mmap.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400
@@ -45,6 +45,16 @@
#define arch_rebalance_pgtables(addr, len) (addr)
#endif
@@ -52481,7 +52978,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
-@@ -70,16 +80,25 @@ static void unmap_region(struct mm_struc
+@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
*/
@@ -52509,7 +53006,14 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
EXPORT_SYMBOL(vm_get_page_prot);
-@@ -231,6 +250,7 @@ static struct vm_area_struct *remove_vma
+ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
+ struct percpu_counter vm_committed_as;
+
+ /*
+@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
struct vm_area_struct *next = vma->vm_next;
might_sleep();
@@ -52517,7 +53021,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file) {
-@@ -267,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
* not page aligned -Ram Gupta
*/
rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
@@ -52525,7 +53029,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
(mm->end_data - mm->start_data) > rlim)
goto out;
-@@ -704,6 +725,12 @@ static int
+@@ -704,6 +726,12 @@ static int
can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -52538,7 +53042,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
if (vma->vm_pgoff == vm_pgoff)
-@@ -723,6 +750,12 @@ static int
+@@ -723,6 +751,12 @@ static int
can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
{
@@ -52551,7 +53055,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (is_mergeable_vma(vma, file, vm_flags) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
pgoff_t vm_pglen;
-@@ -765,12 +798,19 @@ can_vma_merge_after(struct vm_area_struc
+@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct vm_area_struct *prev, unsigned long addr,
unsigned long end, unsigned long vm_flags,
@@ -52572,7 +53076,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* We later require that vma->vm_flags == vm_flags,
* so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -786,6 +826,15 @@ struct vm_area_struct *vma_merge(struct
+@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
if (next && next->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -52588,7 +53092,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Can it merge with the predecessor?
*/
-@@ -805,9 +854,24 @@ struct vm_area_struct *vma_merge(struct
+@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
/* cases 1, 6 */
vma_adjust(prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL);
@@ -52614,7 +53118,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return prev;
}
-@@ -818,12 +882,27 @@ struct vm_area_struct *vma_merge(struct
+@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen)) {
@@ -52644,7 +53148,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return area;
}
-@@ -898,14 +977,11 @@ none:
+@@ -898,14 +978,11 @@ none:
void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
@@ -52660,7 +53164,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->stack_vm += pages;
if (flags & (VM_RESERVED|VM_IO))
mm->reserved_vm += pages;
-@@ -932,7 +1008,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
* (the exception is when the underlying filesystem is noexec
* mounted, in which case we dont add PROT_EXEC.)
*/
@@ -52669,7 +53173,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
prot |= PROT_EXEC;
-@@ -958,7 +1034,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -52678,7 +53182,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (addr & ~PAGE_MASK)
return addr;
-@@ -969,6 +1045,28 @@ unsigned long do_mmap_pgoff(struct file
+@@ -969,6 +1046,28 @@ unsigned long do_mmap_pgoff(struct file
vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@@ -52707,7 +53211,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (flags & MAP_LOCKED)
if (!can_do_mlock())
return -EPERM;
-@@ -980,6 +1078,7 @@ unsigned long do_mmap_pgoff(struct file
+@@ -980,6 +1079,7 @@ unsigned long do_mmap_pgoff(struct file
locked += mm->locked_vm;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
@@ -52715,7 +53219,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
-@@ -1053,6 +1152,9 @@ unsigned long do_mmap_pgoff(struct file
+@@ -1053,6 +1153,9 @@ unsigned long do_mmap_pgoff(struct file
if (error)
return error;
@@ -52725,7 +53229,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1065,10 +1167,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
+@@ -1065,10 +1168,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
*/
int vma_wants_writenotify(struct vm_area_struct *vma)
{
@@ -52738,7 +53242,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
/* The backer wishes to know when pages are first written to? */
-@@ -1117,14 +1219,24 @@ unsigned long mmap_region(struct file *f
+@@ -1117,14 +1220,24 @@ unsigned long mmap_region(struct file *f
unsigned long charged = 0;
struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
@@ -52765,7 +53269,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
/* Check against address space limit. */
-@@ -1173,6 +1285,16 @@ munmap_back:
+@@ -1173,6 +1286,16 @@ munmap_back:
goto unacct_error;
}
@@ -52782,7 +53286,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
-@@ -1195,6 +1317,19 @@ munmap_back:
+@@ -1195,6 +1318,19 @@ munmap_back:
error = file->f_op->mmap(file, vma);
if (error)
goto unmap_and_free_vma;
@@ -52802,7 +53306,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
-@@ -1218,6 +1353,11 @@ munmap_back:
+@@ -1218,6 +1354,11 @@ munmap_back:
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -52814,7 +53318,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* Once vma denies write, undo our temporary denial count */
if (correct_wcount)
atomic_inc(&inode->i_writecount);
-@@ -1226,6 +1366,7 @@ out:
+@@ -1226,6 +1367,7 @@ out:
mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -52822,7 +53326,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (vm_flags & VM_LOCKED) {
/*
* makes pages present; downgrades, drops, reacquires mmap_sem
-@@ -1248,6 +1389,12 @@ unmap_and_free_vma:
+@@ -1248,6 +1390,12 @@ unmap_and_free_vma:
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
charged = 0;
free_vma:
@@ -52835,7 +53339,41 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
kmem_cache_free(vm_area_cachep, vma);
unacct_error:
if (charged)
-@@ -1281,6 +1428,10 @@ arch_get_unmapped_area(struct file *filp
+@@ -1255,6 +1403,33 @@ unacct_error:
+ return error;
+ }
+
++bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++{
++ if (!vma) {
++#ifdef CONFIG_STACK_GROWSUP
++ if (addr > sysctl_heap_stack_gap)
++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
++ else
++ vma = find_vma(current->mm, 0);
++ if (vma && (vma->vm_flags & VM_GROWSUP))
++ return false;
++#endif
++ return true;
++ }
++
++ if (addr + len > vma->vm_start)
++ return false;
++
++ if (vma->vm_flags & VM_GROWSDOWN)
++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
++#ifdef CONFIG_STACK_GROWSUP
++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
++#endif
++
++ return true;
++}
++
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+ *
+@@ -1281,18 +1456,23 @@ arch_get_unmapped_area(struct file *filp
if (flags & MAP_FIXED)
return addr;
@@ -52845,9 +53383,15 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
+
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
-@@ -1289,10 +1440,10 @@ arch_get_unmapped_area(struct file *filp
- return addr;
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
}
if (len > mm->cached_hole_size) {
- start_addr = addr = mm->free_area_cache;
@@ -52860,7 +53404,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
full_search:
-@@ -1303,9 +1454,8 @@ full_search:
+@@ -1303,34 +1483,40 @@ full_search:
* Start a new search - just in case we missed
* some holes.
*/
@@ -52872,7 +53416,29 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->cached_hole_size = 0;
goto full_search;
}
-@@ -1327,10 +1477,16 @@ full_search:
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
+- /*
+- * Remember the place where we stopped the search:
+- */
+- mm->free_area_cache = addr + len;
+- return addr;
+- }
++ if (check_heap_stack_gap(vma, addr, len))
++ break;
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
+ }
++
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
+ }
+ #endif
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
@@ -52890,7 +53456,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->free_area_cache = addr;
mm->cached_hole_size = ~0UL;
}
-@@ -1348,7 +1504,7 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -1348,7 +1534,7 @@ arch_get_unmapped_area_topdown(struct fi
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
@@ -52899,7 +53465,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* requested length too big for entire address space */
if (len > TASK_SIZE)
-@@ -1357,6 +1513,10 @@ arch_get_unmapped_area_topdown(struct fi
+@@ -1357,13 +1543,18 @@ arch_get_unmapped_area_topdown(struct fi
if (flags & MAP_FIXED)
return addr;
@@ -52910,7 +53476,37 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
-@@ -1414,13 +1574,21 @@ bottomup:
+- vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
+- return addr;
++ if (TASK_SIZE - len >= addr) {
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, addr, len))
++ return addr;
++ }
+ }
+
+ /* check if free_area_cache is useful for us */
+@@ -1378,7 +1569,7 @@ arch_get_unmapped_area_topdown(struct fi
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr - len, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+@@ -1395,7 +1586,7 @@ arch_get_unmapped_area_topdown(struct fi
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || addr+len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, addr, len))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+
+@@ -1414,13 +1605,21 @@ bottomup:
* can happen with large stack limits and large mmap()
* allocations.
*/
@@ -52934,7 +53530,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->cached_hole_size = ~0UL;
return addr;
-@@ -1429,6 +1597,12 @@ bottomup:
+@@ -1429,6 +1628,12 @@ bottomup:
void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
@@ -52947,7 +53543,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Is this a new hole at the highest possible address?
*/
-@@ -1436,8 +1610,10 @@ void arch_unmap_area_topdown(struct mm_s
+@@ -1436,8 +1641,10 @@ void arch_unmap_area_topdown(struct mm_s
mm->free_area_cache = addr;
/* dont allow allocations above current base */
@@ -52959,7 +53555,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
}
unsigned long
-@@ -1545,6 +1721,27 @@ out:
+@@ -1545,6 +1752,27 @@ out:
return prev ? prev->vm_next : vma;
}
@@ -52987,7 +53583,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
-@@ -1561,6 +1758,7 @@ static int acct_stack_growth(struct vm_a
+@@ -1561,6 +1789,7 @@ static int acct_stack_growth(struct vm_a
return -ENOMEM;
/* Stack limit test */
@@ -52995,7 +53591,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (size > rlim[RLIMIT_STACK].rlim_cur)
return -ENOMEM;
-@@ -1570,6 +1768,7 @@ static int acct_stack_growth(struct vm_a
+@@ -1570,6 +1799,7 @@ static int acct_stack_growth(struct vm_a
unsigned long limit;
locked = mm->locked_vm + grow;
limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
@@ -53003,7 +53599,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
}
-@@ -1605,35 +1804,40 @@ static
+@@ -1605,35 +1835,42 @@ static
#endif
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
@@ -53026,7 +53622,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
-+ if (locknext && unlikely(anon_vma_prepare(vma->vm_next)))
++ if (locknext && anon_vma_prepare(vma->vm_next))
+ return -ENOMEM;
anon_vma_lock(vma);
+ if (locknext)
@@ -53050,11 +53646,13 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* Somebody else might have raced and expanded it already */
- if (address > vma->vm_end) {
-+ if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
unsigned long size, grow;
size = address - vma->vm_start;
-@@ -1643,6 +1847,8 @@ int expand_upwards(struct vm_area_struct
+@@ -1643,6 +1880,8 @@ int expand_upwards(struct vm_area_struct
if (!error)
vma->vm_end = address;
}
@@ -53063,25 +53661,25 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
anon_vma_unlock(vma);
return error;
}
-@@ -1654,7 +1860,8 @@ int expand_upwards(struct vm_area_struct
+@@ -1654,7 +1893,8 @@ int expand_upwards(struct vm_area_struct
static int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
- int error;
+ int error, lockprev = 0;
-+ struct vm_area_struct *prev = NULL;
++ struct vm_area_struct *prev;
/*
* We must make sure the anon_vma is allocated
-@@ -1668,6 +1875,15 @@ static int expand_downwards(struct vm_ar
+@@ -1668,6 +1908,15 @@ static int expand_downwards(struct vm_ar
if (error)
return error;
++ prev = vma->vm_prev;
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
-+ find_vma_prev(vma->vm_mm, address, &prev);
+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
+#endif
-+ if (lockprev && unlikely(anon_vma_prepare(prev)))
++ if (lockprev && anon_vma_prepare(prev))
+ return -ENOMEM;
+ if (lockprev)
+ anon_vma_lock(prev);
@@ -53089,12 +53687,14 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
anon_vma_lock(vma);
/*
-@@ -1677,9 +1893,15 @@ static int expand_downwards(struct vm_ar
+@@ -1677,9 +1926,17 @@ static int expand_downwards(struct vm_ar
*/
/* Somebody else might have raced and expanded it already */
- if (address < vma->vm_start) {
-+ if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
++ error = -ENOMEM;
++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
unsigned long size, grow;
+#ifdef CONFIG_PAX_SEGMEXEC
@@ -53106,7 +53706,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
-@@ -1687,9 +1909,20 @@ static int expand_downwards(struct vm_ar
+@@ -1687,9 +1944,20 @@ static int expand_downwards(struct vm_ar
if (!error) {
vma->vm_start = address;
vma->vm_pgoff -= grow;
@@ -53127,7 +53727,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return error;
}
-@@ -1765,6 +1998,13 @@ static void remove_vma_list(struct mm_st
+@@ -1765,6 +2033,13 @@ static void remove_vma_list(struct mm_st
do {
long nrpages = vma_pages(vma);
@@ -53141,7 +53741,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
-@@ -1810,6 +2050,16 @@ detach_vmas_to_be_unmapped(struct mm_str
+@@ -1810,6 +2085,16 @@ detach_vmas_to_be_unmapped(struct mm_str
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
vma->vm_prev = NULL;
do {
@@ -53158,7 +53758,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
rb_erase(&vma->vm_rb, &mm->mm_rb);
mm->map_count--;
tail_vma = vma;
-@@ -1837,10 +2087,25 @@ int split_vma(struct mm_struct * mm, str
+@@ -1837,10 +2122,25 @@ int split_vma(struct mm_struct * mm, str
struct mempolicy *pol;
struct vm_area_struct *new;
@@ -53184,7 +53784,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (mm->map_count >= sysctl_max_map_count)
return -ENOMEM;
-@@ -1848,6 +2113,16 @@ int split_vma(struct mm_struct * mm, str
+@@ -1848,6 +2148,16 @@ int split_vma(struct mm_struct * mm, str
if (!new)
return -ENOMEM;
@@ -53201,7 +53801,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/* most fields are the same, copy all, and then fixup */
*new = *vma;
-@@ -1858,8 +2133,29 @@ int split_vma(struct mm_struct * mm, str
+@@ -1858,8 +2168,29 @@ int split_vma(struct mm_struct * mm, str
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
@@ -53231,7 +53831,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
kmem_cache_free(vm_area_cachep, new);
return PTR_ERR(pol);
}
-@@ -1880,6 +2176,28 @@ int split_vma(struct mm_struct * mm, str
+@@ -1880,6 +2211,28 @@ int split_vma(struct mm_struct * mm, str
else
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@@ -53260,7 +53860,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
}
-@@ -1888,11 +2206,30 @@ int split_vma(struct mm_struct * mm, str
+@@ -1888,11 +2241,30 @@ int split_vma(struct mm_struct * mm, str
* work. This now handles partial unmappings.
* Jeremy Fitzhardinge <jeremy@goop.org>
*/
@@ -53291,7 +53891,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
return -EINVAL;
-@@ -1956,6 +2293,8 @@ int do_munmap(struct mm_struct *mm, unsi
+@@ -1956,6 +2328,8 @@ int do_munmap(struct mm_struct *mm, unsi
/* Fix up all other VM information */
remove_vma_list(mm, vma);
@@ -53300,7 +53900,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
}
-@@ -1968,22 +2307,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
+@@ -1968,22 +2342,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
profile_munmap(addr);
@@ -53329,7 +53929,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* this is really a simplified "do_mmap". it only handles
* anonymous maps. eventually we may be able to do some
-@@ -1997,6 +2332,7 @@ unsigned long do_brk(unsigned long addr,
+@@ -1997,6 +2367,7 @@ unsigned long do_brk(unsigned long addr,
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
@@ -53337,7 +53937,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
len = PAGE_ALIGN(len);
if (!len)
-@@ -2008,16 +2344,30 @@ unsigned long do_brk(unsigned long addr,
+@@ -2008,16 +2379,30 @@ unsigned long do_brk(unsigned long addr,
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -53369,7 +53969,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
locked += mm->locked_vm;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
-@@ -2034,22 +2384,22 @@ unsigned long do_brk(unsigned long addr,
+@@ -2034,22 +2419,22 @@ unsigned long do_brk(unsigned long addr,
/*
* Clear old maps. this also does some error checking for us
*/
@@ -53396,7 +53996,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
-@@ -2063,7 +2413,7 @@ unsigned long do_brk(unsigned long addr,
+@@ -2063,7 +2448,7 @@ unsigned long do_brk(unsigned long addr,
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (!vma) {
@@ -53405,7 +54005,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return -ENOMEM;
}
-@@ -2075,11 +2425,12 @@ unsigned long do_brk(unsigned long addr,
+@@ -2075,11 +2460,12 @@ unsigned long do_brk(unsigned long addr,
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
@@ -53420,7 +54020,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return addr;
}
-@@ -2126,8 +2477,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2126,8 +2512,10 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
@@ -53432,7 +54032,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
-@@ -2141,6 +2494,10 @@ int insert_vm_struct(struct mm_struct *
+@@ -2141,6 +2529,10 @@ int insert_vm_struct(struct mm_struct *
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
@@ -53443,7 +54043,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* The vm_pgoff of a purely anonymous vma should be irrelevant
* until its first write fault, when page's anon_vma and index
-@@ -2163,7 +2520,22 @@ int insert_vm_struct(struct mm_struct *
+@@ -2163,7 +2555,22 @@ int insert_vm_struct(struct mm_struct *
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
@@ -53466,7 +54066,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
return 0;
}
-@@ -2181,6 +2553,8 @@ struct vm_area_struct *copy_vma(struct v
+@@ -2181,6 +2588,8 @@ struct vm_area_struct *copy_vma(struct v
struct rb_node **rb_link, *rb_parent;
struct mempolicy *pol;
@@ -53475,7 +54075,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
-@@ -2224,6 +2598,35 @@ struct vm_area_struct *copy_vma(struct v
+@@ -2224,6 +2633,35 @@ struct vm_area_struct *copy_vma(struct v
return new_vma;
}
@@ -53511,7 +54111,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
/*
* Return true if the calling process may expand its vm space by the passed
* number of pages
-@@ -2234,7 +2637,7 @@ int may_expand_vm(struct mm_struct *mm,
+@@ -2234,7 +2672,7 @@ int may_expand_vm(struct mm_struct *mm,
unsigned long lim;
lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
@@ -53520,7 +54120,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
if (cur + npages > lim)
return 0;
return 1;
-@@ -2303,6 +2706,17 @@ int install_special_mapping(struct mm_st
+@@ -2303,6 +2741,17 @@ int install_special_mapping(struct mm_st
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -53540,7 +54140,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c
diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
--- linux-2.6.32.21/mm/mprotect.c 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/mm/mprotect.c 2010-09-04 15:54:52.000000000 -0400
++++ linux-2.6.32.21/mm/mprotect.c 2010-09-17 18:34:04.000000000 -0400
@@ -24,10 +24,16 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
@@ -53607,7 +54207,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
-@@ -144,6 +192,14 @@ mprotect_fixup(struct vm_area_struct *vm
+@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
int error;
int dirty_accountable = 0;
@@ -53622,7 +54222,22 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
if (newflags == oldflags) {
*pprev = vma;
return 0;
-@@ -165,6 +221,38 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+
++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
++
++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
++ return -ENOMEM;
++
++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
++ return -ENOMEM;
++ }
++
+ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
}
}
@@ -53661,7 +54276,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
/*
* First try to merge with previous and/or next vma.
*/
-@@ -195,9 +283,21 @@ success:
+@@ -195,9 +293,21 @@ success:
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
*/
@@ -53684,7 +54299,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
if (vma_wants_writenotify(vma)) {
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
-@@ -238,6 +338,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -238,6 +348,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
end = start + len;
if (end <= start)
return -ENOMEM;
@@ -53702,7 +54317,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
if (!arch_validate_prot(prot))
return -EINVAL;
-@@ -245,7 +356,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -245,7 +366,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
@@ -53711,7 +54326,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
-@@ -277,6 +388,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -277,6 +398,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
if (start > vma->vm_start)
prev = vma;
@@ -53728,7 +54343,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c
for (nstart = start ; ; ) {
unsigned long newflags;
-@@ -301,6 +422,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
+@@ -301,6 +432,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
if (error)
goto out;
perf_event_mmap(vma);
@@ -53843,8 +54458,16 @@ diff -urNp linux-2.6.32.21/mm/mremap.c linux-2.6.32.21/mm/mremap.c
if (ret & ~PAGE_MASK)
diff -urNp linux-2.6.32.21/mm/nommu.c linux-2.6.32.21/mm/nommu.c
--- linux-2.6.32.21/mm/nommu.c 2010-08-29 21:08:20.000000000 -0400
-+++ linux-2.6.32.21/mm/nommu.c 2010-09-04 15:54:52.000000000 -0400
-@@ -761,15 +761,6 @@ struct vm_area_struct *find_vma(struct m
++++ linux-2.6.32.21/mm/nommu.c 2010-09-17 18:34:04.000000000 -0400
+@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
+ int sysctl_overcommit_ratio = 50; /* default is 50% */
+ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+ int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
+-int heap_stack_gap = 0;
+
+ atomic_long_t mmap_pages_allocated;
+
+@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
EXPORT_SYMBOL(find_vma);
/*
@@ -56492,7 +57115,7 @@ diff -urNp linux-2.6.32.21/security/integrity/ima/ima_queue.c linux-2.6.32.21/se
return 0;
diff -urNp linux-2.6.32.21/security/Kconfig linux-2.6.32.21/security/Kconfig
--- linux-2.6.32.21/security/Kconfig 2010-08-13 16:24:37.000000000 -0400
-+++ linux-2.6.32.21/security/Kconfig 2010-09-14 20:52:17.000000000 -0400
++++ linux-2.6.32.21/security/Kconfig 2010-09-17 17:39:35.000000000 -0400
@@ -4,6 +4,505 @@
menu "Security options"
@@ -56516,7 +57139,7 @@ diff -urNp linux-2.6.32.21/security/Kconfig linux-2.6.32.21/security/Kconfig
+
+config PAX
+ bool "Enable various PaX features"
-+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
+ help
+ This allows you to enable various PaX features. PaX adds
+ intrusion prevention mechanisms to the kernel that reduce
diff --git a/2.6.32/4425_grsec-pax-without-grsec.patch b/2.6.32/4425_grsec-pax-without-grsec.patch
index 18fa48e..578f33a 100644
--- a/2.6.32/4425_grsec-pax-without-grsec.patch
+++ b/2.6.32/4425_grsec-pax-without-grsec.patch
@@ -54,7 +54,7 @@ The original version of this patch contained no credits/description.
current->comm, task_pid_nr(current), current_uid(), current_euid());
print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
@@ -1838,10 +1842,12 @@
- #ifdef CONFIG_PAX_USERCOPY
+
void pax_report_leak_to_user(const void *ptr, unsigned long len)
{
+#ifdef CONFIG_GRKERNSEC
@@ -82,11 +82,11 @@ The original version of this patch contained no credits/description.
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -23,7 +23,7 @@
-
+
config PAX
bool "Enable various PaX features"
-- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
-+ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86)
+- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
help
This allows you to enable various PaX features. PaX adds
intrusion prevention mechanisms to the kernel that reduce
diff --git a/2.6.32/4430_grsec-kconfig-default-gids.patch b/2.6.32/4430_grsec-kconfig-default-gids.patch
index b7a0413..7ba8aa2 100644
--- a/2.6.32/4430_grsec-kconfig-default-gids.patch
+++ b/2.6.32/4430_grsec-kconfig-default-gids.patch
@@ -29,25 +29,25 @@ from shooting themselves in the foot.
config GRKERNSEC_EXECLOG
bool "Exec logging"
-@@ -780,7 +780,7 @@
+@@ -785,7 +785,7 @@
config GRKERNSEC_TPE_GID
int "GID for untrusted users"
depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
- default 1005
+ default 100
help
- If you have selected the "Invert GID option" above, setting this
- GID determines what group TPE restrictions will be *disabled* for.
-@@ -792,7 +792,7 @@
+ Setting this GID determines what group TPE restrictions will be
+ *enabled* for. If the sysctl option is enabled, a sysctl option
+@@ -794,7 +794,7 @@
config GRKERNSEC_TPE_GID
int "GID for trusted users"
depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
- default 1005
+ default 10
help
- If you have selected the "Invert GID option" above, setting this
- GID determines what group TPE restrictions will be *disabled* for.
-@@ -866,7 +866,7 @@
+ Setting this GID determines what group TPE restrictions will be
+ *disabled* for. If the sysctl option is enabled, a sysctl option
+@@ -865,7 +865,7 @@
config GRKERNSEC_SOCKET_ALL_GID
int "GID to deny all sockets for"
depends on GRKERNSEC_SOCKET_ALL
@@ -56,7 +56,7 @@ from shooting themselves in the foot.
help
Here you can choose the GID to disable socket access for. Remember to
add the users you want socket access disabled for to the GID
-@@ -887,7 +887,7 @@
+@@ -886,7 +886,7 @@
config GRKERNSEC_SOCKET_CLIENT_GID
int "GID to deny client sockets for"
depends on GRKERNSEC_SOCKET_CLIENT
@@ -65,7 +65,7 @@ from shooting themselves in the foot.
help
Here you can choose the GID to disable client socket access for.
Remember to add the users you want client socket access disabled for to
-@@ -905,7 +905,7 @@
+@@ -904,7 +904,7 @@
config GRKERNSEC_SOCKET_SERVER_GID
int "GID to deny server sockets for"
depends on GRKERNSEC_SOCKET_SERVER
diff --git a/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch b/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch
index 9c7f7be..aa2403a 100644
--- a/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch
+++ b/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch
@@ -21,7 +21,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
--- a/grsecurity/Kconfig
+++ b/grsecurity/Kconfig
-@@ -1372,6 +1372,27 @@
+@@ -1371,6 +1371,27 @@
menu "Logging Options"
depends on GRKERNSEC