summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-09-03 08:00:36 -0400
committerAnthony G. Basile <blueness@gentoo.org>2014-09-03 08:00:36 -0400
commit68d46906563236bdfaebc58465bcc47ca7388a77 (patch)
tree21082a94ef0eb5ec1427af555f7f2efaa72d159b
parentGrsec/PaX: 3.0-{3.2.62,3.14.17,3.16.1}-201409010104 (diff)
downloadhardened-patchset-68d46906563236bdfaebc58465bcc47ca7388a77.tar.gz
hardened-patchset-68d46906563236bdfaebc58465bcc47ca7388a77.tar.bz2
hardened-patchset-68d46906563236bdfaebc58465bcc47ca7388a77.zip
Grsec/PaX: 3.0-{3.14.17,3.16.1}-20140902182620140831
-rw-r--r--3.14.17/0000_README2
-rw-r--r--3.14.17/4420_grsecurity-3.0-3.14.17-201409021816.patch (renamed from 3.14.17/4420_grsecurity-3.0-3.14.17-201408312006.patch)40
-rw-r--r--3.16.1/0000_README2
-rw-r--r--3.16.1/4420_grsecurity-3.0-3.16.1-201409021826.patch (renamed from 3.16.1/4420_grsecurity-3.0-3.16.1-201409010104.patch)187
4 files changed, 184 insertions, 47 deletions
diff --git a/3.14.17/0000_README b/3.14.17/0000_README
index 99b0d3a..19f254f 100644
--- a/3.14.17/0000_README
+++ b/3.14.17/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.17-201408312006.patch
+Patch: 4420_grsecurity-3.0-3.14.17-201409021816.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.17/4420_grsecurity-3.0-3.14.17-201408312006.patch b/3.14.17/4420_grsecurity-3.0-3.14.17-201409021816.patch
index 5fb863b..7887ba7 100644
--- a/3.14.17/4420_grsecurity-3.0-3.14.17-201408312006.patch
+++ b/3.14.17/4420_grsecurity-3.0-3.14.17-201409021816.patch
@@ -58165,10 +58165,33 @@ index ebaff36..7e3ea26 100644
kunmap(page);
file_end_write(file);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
-index 5e0982a..b7e82bc 100644
+index 5e0982a..ca18377 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
-@@ -248,7 +248,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
+@@ -128,6 +128,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx)
+ struct dentry *dentry, *last;
+ struct ceph_dentry_info *di;
+ int err = 0;
++ char d_name[DNAME_INLINE_LEN];
++ const unsigned char *name;
+
+ /* claim ref on last dentry we returned */
+ last = fi->dentry;
+@@ -183,7 +185,12 @@ more:
+ dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+ ctx->pos = di->offset;
+- if (!dir_emit(ctx, dentry->d_name.name,
++ name = dentry->d_name.name;
++ if (name == dentry->d_iname) {
++ memcpy(d_name, name, dentry->d_name.len);
++ name = d_name;
++ }
++ if (!dir_emit(ctx, name,
+ dentry->d_name.len,
+ ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
+ dentry->d_inode->i_mode >> 12)) {
+@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
unsigned frag = fpos_frag(ctx->pos);
@@ -96341,6 +96364,19 @@ index a2a54a8..43ecb68 100644
EXPORT_SYMBOL_GPL(pcpu_base_addr);
static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index a8b9199..dfb79e0 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t entry = *pmdp;
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
+- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
++ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index fd26d04..0cea1b0 100644
--- a/mm/process_vm_access.c
diff --git a/3.16.1/0000_README b/3.16.1/0000_README
index 76ef299..7a2bc49 100644
--- a/3.16.1/0000_README
+++ b/3.16.1/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.16.1-201409010104.patch
+Patch: 4420_grsecurity-3.0-3.16.1-201409021826.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.16.1/4420_grsecurity-3.0-3.16.1-201409010104.patch b/3.16.1/4420_grsecurity-3.0-3.16.1-201409021826.patch
index 6753168..624c5fb 100644
--- a/3.16.1/4420_grsecurity-3.0-3.16.1-201409010104.patch
+++ b/3.16.1/4420_grsecurity-3.0-3.16.1-201409021826.patch
@@ -3775,7 +3775,7 @@ index 7bcee5c..e2f3249 100644
__data_loc = .;
#endif
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index 3c82b37..bd41745 100644
+index 3c82b37..69fa3d2 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
@@ -3814,6 +3814,15 @@ index 3c82b37..bd41745 100644
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
+@@ -1034,7 +1034,7 @@ static void check_kvm_target_cpu(void *ret)
+ /**
+ * Initialize Hyp-mode and memory mappings on all CPUs.
+ */
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int err;
+ int ret, cpu;
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 14a0d98..7771a7d 100644
--- a/arch/arm/lib/clear_user.S
@@ -7597,6 +7606,19 @@ index 51706d6..ec1178c 100644
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index f3c56a1..6a2f01c 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -841,7 +841,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+ return r;
+ }
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int ret;
+
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index becc42b..9e43d4b 100644
--- a/arch/mips/mm/fault.c
@@ -18130,7 +18152,7 @@ index 81bb91b..9392125 100644
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 0ec0560..5dc64bd 100644
+index 0ec0560..f169e5b 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
@@ -18193,7 +18215,23 @@ index 0ec0560..5dc64bd 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -150,6 +190,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+@@ -131,8 +171,13 @@ static inline int pte_exec(pte_t pte)
+
+ static inline int pte_special(pte_t pte)
+ {
+- return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) ==
+- (_PAGE_PRESENT|_PAGE_SPECIAL);
++ /*
++ * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
++ * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
++ * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
++ */
++ return (pte_flags(pte) & _PAGE_SPECIAL) &&
++ (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
+ }
+
+ static inline unsigned long pte_pfn(pte_t pte)
+@@ -150,6 +195,11 @@ static inline unsigned long pud_pfn(pud_t pud)
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
@@ -18205,7 +18243,7 @@ index 0ec0560..5dc64bd 100644
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
-@@ -203,9 +248,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -203,9 +253,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -18236,7 +18274,7 @@ index 0ec0560..5dc64bd 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -435,6 +500,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -435,6 +505,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -18253,7 +18291,7 @@ index 0ec0560..5dc64bd 100644
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/log2.h>
-@@ -581,7 +656,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+@@ -581,7 +661,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -18262,7 +18300,7 @@ index 0ec0560..5dc64bd 100644
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-@@ -621,7 +696,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+@@ -621,7 +701,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -18271,7 +18309,7 @@ index 0ec0560..5dc64bd 100644
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
-@@ -636,7 +711,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -636,7 +716,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -18280,7 +18318,7 @@ index 0ec0560..5dc64bd 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -659,7 +734,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -659,7 +739,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -18294,7 +18332,7 @@ index 0ec0560..5dc64bd 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -670,6 +750,23 @@ static inline int pgd_none(pgd_t pgd)
+@@ -670,6 +755,23 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -18318,7 +18356,7 @@ index 0ec0560..5dc64bd 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -836,11 +933,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+@@ -836,11 +938,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -40781,7 +40819,7 @@ index dd3a78c..386d49c 100644
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
-index de6da95..a2e72c0 100644
+index de6da95..c98278b 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -21,6 +21,7 @@
@@ -40797,7 +40835,7 @@ index de6da95..a2e72c0 100644
}
-static struct clk_ops clk_pll_ops = {
-+static struct clk_ops_no_const clk_pll_ops __read_only = {
++static clk_ops_no_const clk_pll_ops __read_only = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
};
@@ -60802,10 +60840,33 @@ index 4b1fb5c..0d2a699 100644
kunmap(page);
file_end_write(file);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
-index c29d6ae..a56c4ae 100644
+index c29d6ae..719b9bb 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
-@@ -250,7 +250,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
+@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
+ struct dentry *dentry, *last;
+ struct ceph_dentry_info *di;
+ int err = 0;
++ char d_name[DNAME_INLINE_LEN];
++ const unsigned char *name;
+
+ /* claim ref on last dentry we returned */
+ last = fi->dentry;
+@@ -192,7 +194,12 @@ more:
+
+ dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+- if (!dir_emit(ctx, dentry->d_name.name,
++ name = dentry->d_name.name;
++ if (name == dentry->d_iname) {
++ memcpy(d_name, name, dentry->d_name.len);
++ name = d_name;
++ }
++ if (!dir_emit(ctx, name,
+ dentry->d_name.len,
+ ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
+ dentry->d_inode->i_mode >> 12)) {
+@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
unsigned frag = fpos_frag(ctx->pos);
@@ -99455,7 +99516,7 @@ index a013bc9..a897a14 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 8b44f76..66f1954 100644
+index 8b44f76..babeaec 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -99492,7 +99553,34 @@ index 8b44f76..66f1954 100644
}
/*
-@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -751,7 +757,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn = pte_pfn(pte);
+
+ if (HAVE_PTE_SPECIAL) {
+- if (likely(!pte_special(pte) || pte_numa(pte)))
++ if (likely(!pte_special(pte)))
+ goto check_pfn;
+ if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ return NULL;
+@@ -777,15 +783,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ }
+ }
+
++ if (is_zero_pfn(pfn))
++ return NULL;
+ check_pfn:
+ if (unlikely(pfn > highest_memmap_pfn)) {
+ print_bad_pte(vma, addr, pte, NULL);
+ return NULL;
+ }
+
+- if (is_zero_pfn(pfn))
+- return NULL;
+-
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+@@ -1501,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -99503,7 +99591,7 @@ index 8b44f76..66f1954 100644
retval = 0;
pte_unmap_unlock(pte, ptl);
return retval;
-@@ -1545,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -1545,9 +1554,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -99525,7 +99613,7 @@ index 8b44f76..66f1954 100644
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
-@@ -1630,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -1630,6 +1651,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -99533,7 +99621,7 @@ index 8b44f76..66f1954 100644
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
-@@ -1877,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1877,7 +1899,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
@@ -99544,7 +99632,7 @@ index 8b44f76..66f1954 100644
if (!pmd)
return -ENOMEM;
do {
-@@ -1897,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -1897,7 +1921,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long next;
int err;
@@ -99555,7 +99643,7 @@ index 8b44f76..66f1954 100644
if (!pud)
return -ENOMEM;
do {
-@@ -2019,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+@@ -2019,6 +2045,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
return ret;
}
@@ -99742,7 +99830,7 @@ index 8b44f76..66f1954 100644
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
-@@ -2216,6 +2423,12 @@ gotten:
+@@ -2216,6 +2422,12 @@ gotten:
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
@@ -99755,7 +99843,7 @@ index 8b44f76..66f1954 100644
if (old_page) {
if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2267,6 +2480,10 @@ gotten:
+@@ -2267,6 +2479,10 @@ gotten:
page_remove_rmap(old_page);
}
@@ -99766,7 +99854,7 @@ index 8b44f76..66f1954 100644
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
-@@ -2540,6 +2757,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2540,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
@@ -99778,7 +99866,7 @@ index 8b44f76..66f1954 100644
unlock_page(page);
if (page != swapcache) {
/*
-@@ -2563,6 +2785,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2563,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -99790,7 +99878,7 @@ index 8b44f76..66f1954 100644
unlock:
pte_unmap_unlock(page_table, ptl);
out:
-@@ -2582,40 +2809,6 @@ out_release:
+@@ -2582,40 +2808,6 @@ out_release:
}
/*
@@ -99831,7 +99919,7 @@ index 8b44f76..66f1954 100644
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2624,27 +2816,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags)
{
@@ -99864,7 +99952,7 @@ index 8b44f76..66f1954 100644
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2668,6 +2856,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
@@ -99876,7 +99964,7 @@ index 8b44f76..66f1954 100644
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
setpte:
-@@ -2675,6 +2869,12 @@ setpte:
+@@ -2675,6 +2868,12 @@ setpte:
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, address, page_table);
@@ -99889,7 +99977,7 @@ index 8b44f76..66f1954 100644
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
-@@ -2906,6 +3106,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2906,6 +3105,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, false, false);
@@ -99901,7 +99989,7 @@ index 8b44f76..66f1954 100644
unlock_page(fault_page);
unlock_out:
pte_unmap_unlock(pte, ptl);
-@@ -2947,7 +3152,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2947,7 +3151,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(fault_page);
goto uncharge_out;
}
@@ -99920,7 +100008,7 @@ index 8b44f76..66f1954 100644
pte_unmap_unlock(pte, ptl);
unlock_page(fault_page);
page_cache_release(fault_page);
-@@ -2995,6 +3211,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2995,6 +3210,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
do_set_pte(vma, address, fault_page, pte, true, false);
@@ -99932,7 +100020,7 @@ index 8b44f76..66f1954 100644
pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page))
-@@ -3225,6 +3446,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+@@ -3225,6 +3445,12 @@ static int handle_pte_fault(struct mm_struct *mm,
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
@@ -99945,7 +100033,7 @@ index 8b44f76..66f1954 100644
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
-@@ -3241,9 +3468,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3241,9 +3467,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *pte;
@@ -99987,7 +100075,7 @@ index 8b44f76..66f1954 100644
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
-@@ -3371,6 +3630,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3371,6 +3629,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -100011,7 +100099,7 @@ index 8b44f76..66f1954 100644
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
-@@ -3401,6 +3677,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3401,6 +3676,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -100042,7 +100130,7 @@ index 8b44f76..66f1954 100644
#endif /* __PAGETABLE_PMD_FOLDED */
#if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3414,7 +3714,7 @@ static int __init gate_vma_init(void)
+@@ -3414,7 +3713,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -100051,7 +100139,7 @@ index 8b44f76..66f1954 100644
return 0;
}
-@@ -3548,8 +3848,8 @@ out:
+@@ -3548,8 +3847,8 @@ out:
return ret;
}
@@ -100062,7 +100150,7 @@ index 8b44f76..66f1954 100644
{
resource_size_t phys_addr;
unsigned long prot = 0;
-@@ -3575,8 +3875,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
+@@ -3575,8 +3874,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* Access another process' address space as given in mm. If non-NULL, use the
* given task for page fault accounting.
*/
@@ -100073,7 +100161,7 @@ index 8b44f76..66f1954 100644
{
struct vm_area_struct *vma;
void *old_buf = buf;
-@@ -3584,7 +3884,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3584,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
while (len) {
@@ -100082,7 +100170,7 @@ index 8b44f76..66f1954 100644
void *maddr;
struct page *page = NULL;
-@@ -3643,8 +3943,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3643,8 +3942,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*
* The caller must hold a reference on @mm.
*/
@@ -100093,7 +100181,7 @@ index 8b44f76..66f1954 100644
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
-@@ -3654,11 +3954,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3654,11 +3953,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Source/target buffer must be kernel space,
* Do not walk the page table directly, use get_user_pages
*/
@@ -102063,6 +102151,19 @@ index 2ddf9a9..f8fc075 100644
EXPORT_SYMBOL_GPL(pcpu_base_addr);
static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
+diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
+index a8b9199..dfb79e0 100644
+--- a/mm/pgtable-generic.c
++++ b/mm/pgtable-generic.c
+@@ -195,7 +195,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t entry = *pmdp;
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
+- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
++ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 5077afc..846c9ef 100644
--- a/mm/process_vm_access.c