summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-08-24 07:29:58 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-08-24 07:29:58 -0400
commit9994874d540fa5cffaf5a44f90cd65d770fae08d (patch)
tree0e641a995f864298904309b89fba544aad3ea47f
parentgrsecurity-{3.14.51,4.1.6}-201508181953 (diff)
downloadhardened-patchset-9994874d540fa5cffaf5a44f90cd65d770fae08d.tar.gz
hardened-patchset-9994874d540fa5cffaf5a44f90cd65d770fae08d.tar.bz2
hardened-patchset-9994874d540fa5cffaf5a44f90cd65d770fae08d.zip
grsecurity-3.1-4.1.6-20150823081820150823
-rw-r--r--4.1.6/0000_README2
-rw-r--r--4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch (renamed from 4.1.6/4420_grsecurity-3.1-4.1.6-201508181953.patch)330
2 files changed, 293 insertions, 39 deletions
diff --git a/4.1.6/0000_README b/4.1.6/0000_README
index ddf2d35..fe455ba 100644
--- a/4.1.6/0000_README
+++ b/4.1.6/0000_README
@@ -6,7 +6,7 @@ Patch: 1005_linux-4.1.6.patch
From: http://www.kernel.org
Desc: Linux 4.1.6
-Patch: 4420_grsecurity-3.1-4.1.6-201508181953.patch
+Patch: 4420_grsecurity-3.1-4.1.6-201508230818.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.1.6/4420_grsecurity-3.1-4.1.6-201508181953.patch b/4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
index ddef976..61bc4c1 100644
--- a/4.1.6/4420_grsecurity-3.1-4.1.6-201508181953.patch
+++ b/4.1.6/4420_grsecurity-3.1-4.1.6-201508230818.patch
@@ -999,6 +999,20 @@ index 45df48b..952017a 100644
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 985227c..8acc029 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -304,6 +304,9 @@ INSTALL_TARGETS = zinstall uinstall install
+
+ PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
++bootpImage uImage: zImage
++zImage: Image
++
+ $(BOOT_TARGETS): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c119..abe7041 100644
--- a/arch/arm/include/asm/atomic.h
@@ -19223,10 +19237,39 @@ index 70bbe39..4ae2bd4 100644
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
-index 751bf4b..a1278b5 100644
+index 751bf4b..3cc39f1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
-@@ -112,7 +112,7 @@ do { \
+@@ -79,12 +79,12 @@ do { \
+ #else /* CONFIG_X86_32 */
+
+ /* frame pointer must be last for get_wchan */
+-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
++#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+ #define __EXTRA_CLOBBER \
+ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+- "r12", "r13", "r14", "r15", "flags"
++ "r12", "r13", "r14", "r15"
+
+ #ifdef CONFIG_CC_STACKPROTECTOR
+ #define __switch_canary \
+@@ -100,11 +100,7 @@ do { \
+ #define __switch_canary_iparam
+ #endif /* CC_STACKPROTECTOR */
+
+-/*
+- * There is no need to save or restore flags, because flags are always
+- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
+- * has no effect.
+- */
++/* Save restore flags to clear handle leaking NT */
+ #define switch_to(prev, next, last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+@@ -112,7 +108,7 @@ do { \
"call __switch_to\n\t" \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \
@@ -19235,7 +19278,7 @@ index 751bf4b..a1278b5 100644
"movq %%rax,%%rdi\n\t" \
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
-@@ -123,7 +123,7 @@ do { \
+@@ -123,7 +119,7 @@ do { \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[_tif_fork] "i" (_TIF_FORK), \
@@ -48751,6 +48794,19 @@ index dce5f7b..2433466 100644
#include "ftmac100.h"
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+index c754b20..c9da1b5 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
+
+ static inline bool fm10k_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b772..250fe69 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48764,6 +48820,32 @@ index a92b772..250fe69 100644
smp_mb(); /* Force the above update. */
}
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index a0a9b1f..3fe93e7 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6584,7 +6584,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+
+ static inline bool igb_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 5be12a0..463ff47 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
+
+ static inline bool ixgbe_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index e5ba040..d47531c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -48777,6 +48859,19 @@ index e5ba040..d47531c 100644
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index e71cdde..1d7b00b 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+
+ static inline bool ixgbevf_page_is_reserved(struct page *page)
+ {
+- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+
+ /**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 74d0389..086ac03 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
@@ -97837,7 +97932,7 @@ index 3d385c8..deacb6a 100644
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
diff --git a/include/linux/mm.h b/include/linux/mm.h
-index 0755b9f..2960e96 100644
+index 0755b9f..bf8eab1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
@@ -97871,7 +97966,42 @@ index 0755b9f..2960e96 100644
struct mmu_gather;
struct inode;
-@@ -1131,8 +1137,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+@@ -1002,6 +1008,34 @@ static inline int page_mapped(struct page *page)
+ }
+
+ /*
++ * Return true only if the page has been allocated with
++ * ALLOC_NO_WATERMARKS and the low watermark was not
++ * met implying that the system is under some pressure.
++ */
++static inline bool page_is_pfmemalloc(struct page *page)
++{
++ /*
++ * Page index cannot be this large so this must be
++ * a pfmemalloc page.
++ */
++ return page->index == -1UL;
++}
++
++/*
++ * Only to be called by the page allocator on a freshly allocated
++ * page.
++ */
++static inline void set_page_pfmemalloc(struct page *page)
++{
++ page->index = -1UL;
++}
++
++static inline void clear_page_pfmemalloc(struct page *page)
++{
++ page->index = 0;
++}
++
++/*
+ * Different kinds of faults, as returned by handle_mm_fault().
+ * Used to decide whether a process gets delivered SIGBUS or
+ * just gets major/minor fault counters bumped up.
+@@ -1131,8 +1165,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
@@ -97882,7 +98012,7 @@ index 0755b9f..2960e96 100644
static inline void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
-@@ -1172,9 +1178,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
+@@ -1172,9 +1206,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
@@ -97895,7 +98025,7 @@ index 0755b9f..2960e96 100644
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
-@@ -1219,34 +1225,6 @@ int clear_page_dirty_for_io(struct page *page);
+@@ -1219,34 +1253,6 @@ int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -97930,7 +98060,7 @@ index 0755b9f..2960e96 100644
extern struct task_struct *task_of_stack(struct task_struct *task,
struct vm_area_struct *vma, bool in_group);
-@@ -1369,8 +1347,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+@@ -1369,8 +1375,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
{
return 0;
}
@@ -97946,7 +98076,7 @@ index 0755b9f..2960e96 100644
#endif
#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
-@@ -1380,6 +1365,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+@@ -1380,6 +1393,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
return 0;
}
@@ -97959,7 +98089,7 @@ index 0755b9f..2960e96 100644
static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
-@@ -1392,6 +1383,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+@@ -1392,6 +1411,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
@@ -97967,7 +98097,7 @@ index 0755b9f..2960e96 100644
static inline void mm_nr_pmds_init(struct mm_struct *mm)
{
-@@ -1429,11 +1421,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
+@@ -1429,11 +1449,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
NULL: pud_offset(pgd, address);
}
@@ -97991,7 +98121,7 @@ index 0755b9f..2960e96 100644
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-@@ -1810,12 +1814,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+@@ -1810,12 +1842,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
@@ -98015,7 +98145,7 @@ index 0755b9f..2960e96 100644
if (rlim < RLIM_INFINITY) {
if (((new - start) + (end_data - start_data)) > rlim)
return -ENOSPC;
-@@ -1840,7 +1855,7 @@ extern int install_special_mapping(struct mm_struct *mm,
+@@ -1840,7 +1883,7 @@ extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
@@ -98024,7 +98154,7 @@ index 0755b9f..2960e96 100644
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
-@@ -1848,6 +1863,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1848,6 +1891,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate);
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
@@ -98032,7 +98162,7 @@ index 0755b9f..2960e96 100644
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
-@@ -1876,10 +1892,11 @@ struct vm_unmapped_area_info {
+@@ -1876,10 +1920,11 @@ struct vm_unmapped_area_info {
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
@@ -98046,7 +98176,7 @@ index 0755b9f..2960e96 100644
/*
* Search for an unmapped address range.
-@@ -1891,7 +1908,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+@@ -1891,7 +1936,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
* - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
*/
static inline unsigned long
@@ -98055,7 +98185,7 @@ index 0755b9f..2960e96 100644
{
if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
return unmapped_area_topdown(info);
-@@ -1953,6 +1970,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
+@@ -1953,6 +1998,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
@@ -98066,7 +98196,7 @@ index 0755b9f..2960e96 100644
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1982,10 +2003,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+@@ -1982,10 +2031,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
}
#ifdef CONFIG_MMU
@@ -98079,7 +98209,7 @@ index 0755b9f..2960e96 100644
{
return __pgprot(0);
}
-@@ -2047,6 +2068,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+@@ -2047,6 +2096,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct mm_struct *mm,
unsigned long flags, struct file *file, long pages)
{
@@ -98091,7 +98221,7 @@ index 0755b9f..2960e96 100644
mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */
-@@ -2149,7 +2175,7 @@ extern int unpoison_memory(unsigned long pfn);
+@@ -2149,7 +2203,7 @@ extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
@@ -98100,7 +98230,7 @@ index 0755b9f..2960e96 100644
extern int soft_offline_page(struct page *page, int flags);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-@@ -2200,5 +2226,11 @@ void __init setup_nr_node_ids(void);
+@@ -2200,5 +2254,11 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
@@ -98113,10 +98243,26 @@ index 0755b9f..2960e96 100644
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 8d37e26..6a6f55b 100644
+index 8d37e26..29c54c9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
-@@ -313,7 +313,9 @@ struct vm_area_struct {
+@@ -63,15 +63,6 @@ struct page {
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* sl[aou]b first free object */
+- bool pfmemalloc; /* If set by the page allocator,
+- * ALLOC_NO_WATERMARKS was set
+- * and the low watermark was not
+- * met implying that the system
+- * is under some pressure. The
+- * caller should try ensure
+- * this page is only used to
+- * free other pages.
+- */
+ };
+
+ union {
+@@ -313,7 +304,9 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
@@ -98127,7 +98273,7 @@ index 8d37e26..6a6f55b 100644
struct core_thread {
struct task_struct *task;
-@@ -466,7 +468,25 @@ struct mm_struct {
+@@ -466,7 +459,25 @@ struct mm_struct {
/* address of the bounds directory */
void __user *bd_addr;
#endif
@@ -99784,7 +99930,7 @@ index ab1e039..ad4229e 100644
static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index f15154a..72cf02c 100644
+index f15154a..17b985a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -776,7 +776,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
@@ -99796,7 +99942,32 @@ index f15154a..72cf02c 100644
gfp_t priority)
{
return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
-@@ -1971,7 +1971,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+@@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /*
+- * Propagate page->pfmemalloc to the skb if we can. The problem is
+- * that not all callers have unique ownership of the page. If
+- * pfmemalloc is set, we check the mapping as a mapping implies
+- * page->index is set (index and pfmemalloc share space).
+- * If it's a valid mapping, we cannot use page->pfmemalloc but we
+- * do not lose pfmemalloc information as the pages would not be
+- * allocated using __GFP_MEMALLOC.
++ * Propagate page pfmemalloc to the skb if we can. The problem is
++ * that not all callers have unique ownership of the page but rely
++ * on page_is_pfmemalloc doing the right thing(tm).
+ */
+ frag->page.p = page;
+ frag->page_offset = off;
+ skb_frag_size_set(frag, size);
+
+ page = compound_head(page);
+- if (page->pfmemalloc && !page->mapping)
++ if (page_is_pfmemalloc(page))
+ skb->pfmemalloc = true;
+ }
+
+@@ -1971,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
return skb->inner_transport_header - skb->inner_network_header;
}
@@ -99805,7 +99976,7 @@ index f15154a..72cf02c 100644
{
return skb_network_header(skb) - skb->data;
}
-@@ -2031,7 +2031,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+@@ -2031,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
@@ -99814,7 +99985,16 @@ index f15154a..72cf02c 100644
#endif
int ___pskb_trim(struct sk_buff *skb, unsigned int len);
-@@ -2673,9 +2673,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+@@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void)
+ static inline void skb_propagate_pfmemalloc(struct page *page,
+ struct sk_buff *skb)
+ {
+- if (page && page->pfmemalloc)
++ if (page_is_pfmemalloc(page))
+ skb->pfmemalloc = true;
+ }
+
+@@ -2673,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
@@ -99826,7 +100006,7 @@ index f15154a..72cf02c 100644
struct msghdr *msg, int size)
{
return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
-@@ -3197,6 +3197,9 @@ static inline void nf_reset(struct sk_buff *skb)
+@@ -3197,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
#endif
@@ -113722,7 +113902,7 @@ index eb59f7e..b23a2a8 100644
unsigned long bg_thresh,
unsigned long dirty,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index ebffa0e..c61160a 100644
+index ebffa0e..a5ae7f7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -113827,7 +114007,25 @@ index ebffa0e..c61160a 100644
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
-@@ -1649,6 +1689,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
+@@ -983,12 +1023,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
+ set_page_owner(page, order, gfp_flags);
+
+ /*
+- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
++ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
+ * allocate the page. The expectation is that the caller is taking
+ * steps that will free more memory. The caller should avoid the page
+ * being used for !PFMEMALLOC purposes.
+ */
+- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
++ if (alloc_flags & ALLOC_NO_WATERMARKS)
++ set_page_pfmemalloc(page);
++ else
++ clear_page_pfmemalloc(page);
+
+ return 0;
+ }
+@@ -1649,6 +1692,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
@@ -113836,7 +114034,7 @@ index ebffa0e..c61160a 100644
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
-@@ -1660,7 +1702,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
+@@ -1660,7 +1705,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
}
@@ -113845,7 +114043,7 @@ index ebffa0e..c61160a 100644
return 1UL << order;
}
-@@ -1749,7 +1791,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -1749,7 +1794,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
@@ -113854,7 +114052,7 @@ index ebffa0e..c61160a 100644
!test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-@@ -2068,7 +2110,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
+@@ -2068,7 +2113,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
do {
mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -113863,7 +114061,7 @@ index ebffa0e..c61160a 100644
clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
} while (zone++ != preferred_zone);
}
-@@ -5781,7 +5823,7 @@ static void __setup_per_zone_wmarks(void)
+@@ -5781,7 +5826,7 @@ static void __setup_per_zone_wmarks(void)
__mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) - low_wmark_pages(zone) -
@@ -114101,7 +114299,7 @@ index 47d536e..8321b4e 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 7eb38dd..5dee2c4 100644
+index 7eb38dd..0451459 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -114148,6 +114346,24 @@ index 7eb38dd..5dee2c4 100644
slab_state = PARTIAL_NODE;
slab_early_init = 0;
+@@ -1602,7 +1606,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ }
+
+ /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
+- if (unlikely(page->pfmemalloc))
++ if (page_is_pfmemalloc(page))
+ pfmemalloc_active = true;
+
+ nr_pages = (1 << cachep->gfporder);
+@@ -1613,7 +1617,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ add_zone_page_state(page_zone(page),
+ NR_SLAB_UNRECLAIMABLE, nr_pages);
+ __SetPageSlab(page);
+- if (page->pfmemalloc)
++ if (page_is_pfmemalloc(page))
+ SetPageSlabPfmemalloc(page);
+
+ if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
@@ -2073,7 +2077,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
cachep = find_mergeable(size, align, flags, name, ctor);
@@ -114993,7 +115209,7 @@ index 4765f65..f17284d 100644
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
-index 54c0876..9fb1661 100644
+index 54c0876..61847f8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -198,7 +198,7 @@ struct track {
@@ -115014,6 +115230,15 @@ index 54c0876..9fb1661 100644
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
#ifdef CONFIG_STACKTRACE
{
+@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+ inc_slabs_node(s, page_to_nid(page), page->objects);
+ page->slab_cache = s;
+ __SetPageSlab(page);
+- if (page->pfmemalloc)
++ if (page_is_pfmemalloc(page))
+ SetPageSlabPfmemalloc(page);
+
+ start = page_address(page);
@@ -2707,6 +2707,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
slab_free_hook(s, x);
@@ -115904,6 +116129,26 @@ index c92b52f..006c052 100644
.kind = "vlan",
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 81925b9..fcf6fe0 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
+ struct p9_client *clnt = fid->clnt;
+ struct p9_req_t *req;
+ int total = 0;
++ *err = 0;
+
+ p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
+@@ -1616,6 +1617,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ struct p9_client *clnt = fid->clnt;
+ struct p9_req_t *req;
+ int total = 0;
++ *err = 0;
+
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+ fid->fid, (unsigned long long) offset,
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 6ab36ae..6f1841b 100644
--- a/net/9p/mod.c
@@ -117281,9 +117526,18 @@ index 3b6899b..cf36238 100644
{
struct socket *sock;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index 41ec022..3cc0a1c 100644
+index 41ec022..89b1df7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
+@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+- if (virt_to_head_page(data)->pfmemalloc)
++ if (page_is_pfmemalloc(virt_to_head_page(data)))
+ skb->pfmemalloc = 1;
+ }
+ return skb;
@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)