summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2014-09-29 07:56:06 -0400
committerAnthony G. Basile <blueness@gentoo.org>2014-09-29 07:56:33 -0400
commit667ea5760ac409dee4a40d992131bc42a7cd6c60 (patch)
tree8096d4abc044018dcb045ba06be03b371e3d240b
parentGrsec/PaX: 3.0-{3.2.63,3.14.19,3.16.3}-201409180901 (diff)
downloadhardened-patchset-667ea5760ac409dee4a40d992131bc42a7cd6c60.tar.gz
hardened-patchset-667ea5760ac409dee4a40d992131bc42a7cd6c60.tar.bz2
hardened-patchset-667ea5760ac409dee4a40d992131bc42a7cd6c60.zip
Grsec/PaX: 3.0-{3.2.63,3.14.19,3.16.3}-20140928202520140928
-rw-r--r--3.14.19/0000_README2
-rw-r--r--3.14.19/4420_grsecurity-3.0-3.14.19-201409282024.patch (renamed from 3.14.19/4420_grsecurity-3.0-3.14.19-201409180900.patch)765
-rw-r--r--3.16.3/0000_README2
-rw-r--r--3.16.3/4420_grsecurity-3.0-3.16.3-201409282025.patch (renamed from 3.16.3/4420_grsecurity-3.0-3.16.3-201409180901.patch)1174
-rw-r--r--3.2.63/0000_README2
-rw-r--r--3.2.63/4420_grsecurity-3.0-3.2.63-201409282020.patch (renamed from 3.2.63/4420_grsecurity-3.0-3.2.63-201409180857.patch)263
6 files changed, 1987 insertions, 221 deletions
diff --git a/3.14.19/0000_README b/3.14.19/0000_README
index d825e9a..56f5a2f 100644
--- a/3.14.19/0000_README
+++ b/3.14.19/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.19-201409180900.patch
+Patch: 4420_grsecurity-3.0-3.14.19-201409282024.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.19/4420_grsecurity-3.0-3.14.19-201409180900.patch b/3.14.19/4420_grsecurity-3.0-3.14.19-201409282024.patch
index 3cb0c39..6d97454 100644
--- a/3.14.19/4420_grsecurity-3.0-3.14.19-201409180900.patch
+++ b/3.14.19/4420_grsecurity-3.0-3.14.19-201409282024.patch
@@ -2166,6 +2166,95 @@ index 71a06b2..8bb9ae1 100644
/*
* Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 83259b8..8c7e01d 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -1,6 +1,9 @@
+ #ifndef __ASMARM_TLS_H
+ #define __ASMARM_TLS_H
+
++#include <linux/compiler.h>
++#include <asm/thread_info.h>
++
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+ .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
+@@ -50,6 +53,50 @@
+ #endif
+
+ #ifndef __ASSEMBLY__
++#include <asm/pgtable.h>
++
++static inline void set_tls(unsigned long val)
++{
++ struct thread_info *thread;
++
++ thread = current_thread_info();
++
++ thread->tp_value[0] = val;
++
++ /*
++ * This code runs with preemption enabled and therefore must
++ * be reentrant with respect to switch_tls.
++ *
++ * We need to ensure ordering between the shadow state and the
++ * hardware state, so that we don't corrupt the hardware state
++ * with a stale shadow state during context switch.
++ *
++ * If we're preempted here, switch_tls will load TPIDRURO from
++ * thread_info upon resuming execution and the following mcr
++ * is merely redundant.
++ */
++ barrier();
++
++ if (!tls_emu) {
++ if (has_tls_reg) {
++ asm("mcr p15, 0, %0, c13, c0, 3"
++ : : "r" (val));
++ } else {
++ /*
++ * User space must never try to access this
++ * directly. Expect your app to break
++ * eventually if you do so. The user helper
++ * at 0xffff0fe0 must be used instead. (see
++ * entry-armv.S for details)
++ */
++ pax_open_kernel();
++ *((unsigned int *)0xffff0ff0) = val;
++ pax_close_kernel();
++ }
++
++ }
++}
++
+ static inline unsigned long get_tpuser(void)
+ {
+ unsigned long reg = 0;
+@@ -59,5 +106,23 @@ static inline unsigned long get_tpuser(void)
+
+ return reg;
+ }
++
++static inline void set_tpuser(unsigned long val)
++{
++ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
++ * we need not update thread_info.
++ */
++ if (has_tls_reg && !tls_emu) {
++ asm("mcr p15, 0, %0, c13, c0, 2"
++ : : "r" (val));
++ }
++}
++
++static inline void flush_tls(void)
++{
++ set_tls(0);
++ set_tpuser(0);
++}
++
+ #endif
+ #endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7f3f3cc..bdf0665 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -2841,7 +2930,7 @@ index 07314af..c46655c 100644
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 92f7b15..7048500 100644
+index 92f7b15..b5e6630 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -217,6 +217,7 @@ void machine_power_off(void)
@@ -2872,7 +2961,16 @@ index 92f7b15..7048500 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -334,6 +335,8 @@ void flush_thread(void)
+ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+ memset(&thread->fpstate, 0, sizeof(union fp_state));
+
++ flush_tls();
++
+ thread_notify(THREAD_NOTIFY_FLUSH, thread);
+ }
+
+@@ -425,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -2885,7 +2983,7 @@ index 92f7b15..7048500 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
+@@ -446,7 +443,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -2894,7 +2992,7 @@ index 92f7b15..7048500 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
+@@ -472,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -3139,8 +3237,21 @@ index 7a3be1d..b00c7de 100644
pr_debug("CPU ITCM: copied code from %p - %p\n",
start, end);
itcm_present = true;
+diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
+index 7b8403b..80f0d69 100644
+--- a/arch/arm/kernel/thumbee.c
++++ b/arch/arm/kernel/thumbee.c
+@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+- thread->thumbee_state = 0;
++ teehbr_write(0);
+ break;
+ case THREAD_NOTIFY_SWITCH:
+ current_thread_info()->thumbee_state = teehbr_read();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 172ee18..ce4ec3d 100644
+index 172ee18..381ce44 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3171,17 +3282,38 @@ index 172ee18..ce4ec3d 100644
if (signr)
do_exit(signr);
}
-@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
- * The user helper at 0xffff0fe0 must be used instead.
- * (see entry-armv.S for details)
- */
-+ pax_open_kernel();
- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
-+ pax_close_kernel();
- }
+@@ -578,7 +583,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
+ #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
+ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+ {
+- struct thread_info *thread = current_thread_info();
+ siginfo_t info;
+
+ if ((no >> 16) != (__ARM_NR_BASE>> 16))
+@@ -629,21 +633,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+ return regs->ARM_r0;
+
+ case NR(set_tls):
+- thread->tp_value[0] = regs->ARM_r0;
+- if (tls_emu)
+- return 0;
+- if (has_tls_reg) {
+- asm ("mcr p15, 0, %0, c13, c0, 3"
+- : : "r" (regs->ARM_r0));
+- } else {
+- /*
+- * User space must never try to access this directly.
+- * Expect your app to break eventually if you do so.
+- * The user helper at 0xffff0fe0 must be used instead.
+- * (see entry-armv.S for details)
+- */
+- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+- }
++ set_tls(regs->ARM_r0);
return 0;
-@@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base)
+ #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
+@@ -899,7 +889,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -18010,10 +18142,10 @@ index ed5903b..c7fe163 100644
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index e22c1db..23a625a 100644
+index e22c1db..82f2923 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,10 +16,14 @@
+@@ -16,10 +16,15 @@
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
@@ -18026,11 +18158,12 @@ index e22c1db..23a625a 100644
-extern pmd_t level2_ident_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pmd_t level2_ident_pgt[512*2];
++extern pte_t level1_fixmap_pgt[512];
+extern pgd_t init_level4_pgt[512];
#define swapper_pg_dir init_level4_pgt
-@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -61,7 +66,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -18040,7 +18173,7 @@ index e22c1db..23a625a 100644
}
static inline void native_pmd_clear(pmd_t *pmd)
-@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+@@ -97,7 +104,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
@@ -18050,7 +18183,7 @@ index e22c1db..23a625a 100644
}
static inline void native_pud_clear(pud_t *pud)
-@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
+@@ -107,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -21775,7 +21908,7 @@ index f6dfd93..892ade4 100644
.__cr3 = __pa_nodebug(swapper_pg_dir),
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
-index d9c12d3..7858b62 100644
+index d9c12d3..3e70198 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -2,6 +2,9 @@
@@ -21788,7 +21921,15 @@ index d9c12d3..7858b62 100644
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
-@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
+@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
+
+ void printk_address(unsigned long address)
+ {
+- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
++ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
+ }
+
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
@@ -35857,7 +35998,7 @@ index 201d09a..e4723e5 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 2423ef0..4f6fb5b 100644
+index 2423ef0..a5f0379 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
@@ -35869,17 +36010,63 @@ index 2423ef0..4f6fb5b 100644
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
-@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
+ *
+ * We can construct this by grafting the Xen provided pagetable into
+ * head_64.S's preconstructed pagetables. We copy the Xen L2's into
+- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
+- * means that only the kernel has a physical mapping to start with -
+- * but that's enough to get __va working. We need to fill in the rest
+- * of the physical mapping once some sort of allocator has been set
+- * up.
+- * NOTE: for PVH, the page tables are native.
++ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
++ * kernel has a physical mapping to start with - but that's enough to
++ * get __va working. We need to fill in the rest of the physical
++ * mapping once some sort of allocator has been set up. NOTE: for
++ * PVH, the page tables are native.
+ */
+ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ {
+@@ -1902,8 +1901,14 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ /* L3_i[0] -> level2_ident_pgt */
+ convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt
- * L3_i[511] -> level2_fixmap_pgt */
+- * L3_i[511] -> level2_fixmap_pgt */
++ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
+ convert_pfn_mfn(level3_vmalloc_start_pgt);
+ convert_pfn_mfn(level3_vmalloc_end_pgt);
+ convert_pfn_mfn(level3_vmemmap_pgt);
++
++ /* L3_k[511][506] -> level1_fixmap_pgt */
++ convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
-@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1913,30 +1918,29 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ addr[1] = (unsigned long)l3;
+ addr[2] = (unsigned long)l2;
+ /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
+- * Both L4[272][0] and L4[511][511] have entries that point to the same
++ * Both L4[272][0] and L4[511][510] have entries that point to the same
+ * L2 (PMD) tables. Meaning that if you modify it in __va space
+ * it will be also modified in the __ka space! (But if you just
+ * modify the PMD table to point to other PTE's or none, then you
+ * are OK - which is what cleanup_highmap does) */
+ copy_page(level2_ident_pgt, l2);
+- /* Graft it onto L4[511][511] */
++ /* Graft it onto L4[511][510] */
+ copy_page(level2_kernel_pgt, l2);
+
+- /* Get [511][510] and graft that in level2_fixmap_pgt */
+- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
+- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
+- copy_page(level2_fixmap_pgt, l2);
+- /* Note that we don't do anything with level1_fixmap_pgt which
+- * we don't need. */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -35891,8 +36078,11 @@ index 2423ef0..4f6fb5b 100644
+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
-@@ -2123,6 +2130,7 @@ static void __init xen_post_allocator_init(void)
+ /* Pin down new L4 */
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+@@ -2123,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -35900,7 +36090,7 @@ index 2423ef0..4f6fb5b 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2201,6 +2209,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2201,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -36120,6 +36310,18 @@ index d8f80e7..5f41702 100644
done:
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+index dbf4502..3394b6e 100644
+--- a/block/blk-exec.c
++++ b/block/blk-exec.c
+@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ bool is_pm_resume;
+
+ WARN_ON(irqs_disabled());
++ WARN_ON(rq->cmd_type == REQ_TYPE_FS);
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf5..af12b06 100644
--- a/block/blk-iopoll.c
@@ -36146,6 +36348,28 @@ index ae4ae10..c470b8d 100644
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 883f720..37322f0 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -710,14 +710,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+- !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
+- blk_insert_flush(rq);
+- } else {
+- spin_lock(&ctx->lock);
+- __blk_mq_insert_request(hctx, rq, at_head);
+- spin_unlock(&ctx->lock);
+- }
++ spin_lock(&ctx->lock);
++ __blk_mq_insert_request(hctx, rq, at_head);
++ spin_unlock(&ctx->lock);
+
+ blk_mq_put_ctx(current_ctx);
+
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1..5e988dd 100644
--- a/block/blk-softirq.c
@@ -38097,8 +38321,21 @@ index be73e9d..7fbf140 100644
cmdlist_t *reqQ;
cmdlist_t *cmpQ;
+diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
+index 597f111..c700970 100644
+--- a/drivers/block/drbd/drbd_bitmap.c
++++ b/drivers/block/drbd/drbd_bitmap.c
+@@ -1042,7 +1042,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
+ submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+- atomic_add(len >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(len >> 9, &mdev->rs_sect_ev);
+ }
+ }
+
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index 0e06f0c..c47b81d 100644
+index 0e06f0c..d98cde3 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -582,7 +582,7 @@ struct drbd_epoch {
@@ -38119,6 +38356,17 @@ index 0e06f0c..c47b81d 100644
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned int minor;
+@@ -1032,8 +1032,8 @@ struct drbd_conf {
+ struct mutex own_state_mutex;
+ struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
+ char congestion_reason; /* Why we where congested... */
+- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
+- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
++ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
++ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
+ int rs_last_sect_ev; /* counter to compare with */
+ int rs_last_events; /* counter of read or write "events" (unit sectors)
+ * on the lower level device when we last looked. */
@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
char __user *uoptval;
int err;
@@ -38146,7 +38394,7 @@ index 89c497c..9c736ae 100644
/**
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index 929468e..7d934eb 100644
+index 929468e..efb12f0 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
@@ -38167,6 +38415,17 @@ index 929468e..7d934eb 100644
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
if (mdev->state.conn >= C_SYNC_SOURCE &&
mdev->state.conn <= C_PAUSED_SYNC_T)
+@@ -1886,8 +1886,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
+ atomic_set(&mdev->unacked_cnt, 0);
+ atomic_set(&mdev->local_cnt, 0);
+ atomic_set(&mdev->pp_in_use_by_net, 0);
+- atomic_set(&mdev->rs_sect_in, 0);
+- atomic_set(&mdev->rs_sect_ev, 0);
++ atomic_set_unchecked(&mdev->rs_sect_in, 0);
++ atomic_set_unchecked(&mdev->rs_sect_ev, 0);
+ atomic_set(&mdev->ap_in_flight, 0);
+ atomic_set(&mdev->md_io_in_use, 0);
+
@@ -2577,8 +2577,8 @@ void conn_destroy(struct kref *kref)
{
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
@@ -38201,7 +38460,7 @@ index c706d50..5e1b472 100644
if (!msg)
goto failed;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
-index d073305..4998fea 100644
+index d073305..958be8f 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
@@ -38254,6 +38513,24 @@ index d073305..4998fea 100644
list_add(&epoch->list, &tconn->current_epoch->list);
tconn->current_epoch = epoch;
tconn->epochs++;
+@@ -1688,7 +1688,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
+ list_add(&peer_req->w.list, &mdev->sync_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+
+- atomic_add(data_size >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(data_size >> 9, &mdev->rs_sect_ev);
+ if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return 0;
+
+@@ -1782,7 +1782,7 @@ static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
+ }
+
+- atomic_add(pi->size >> 9, &mdev->rs_sect_in);
++ atomic_add_unchecked(pi->size >> 9, &mdev->rs_sect_in);
+
+ return err;
+ }
@@ -2164,7 +2164,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
err = wait_for_and_update_peer_seq(mdev, peer_seq);
@@ -38272,6 +38549,33 @@ index d073305..4998fea 100644
atomic_inc(&peer_req->epoch->active);
spin_unlock(&tconn->epoch_lock);
+@@ -2326,7 +2326,7 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
+
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&mdev->rs_sect_ev);
++ atomic_read_unchecked(&mdev->rs_sect_ev);
+
+ if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
+ unsigned long rs_left;
+@@ -2459,7 +2459,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
+ mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ } else if (pi->cmd == P_OV_REPLY) {
+ /* track progress, we may need to throttle */
+- atomic_add(size >> 9, &mdev->rs_sect_in);
++ atomic_add_unchecked(size >> 9, &mdev->rs_sect_in);
+ peer_req->w.cb = w_e_end_ov_reply;
+ dec_rs_pending(mdev);
+ /* drbd_rs_begin_io done when we sent this request,
+@@ -2520,7 +2520,7 @@ static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
+ goto out_free_e;
+
+ submit_for_resync:
+- atomic_add(size >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(size >> 9, &mdev->rs_sect_ev);
+
+ submit:
+ inc_unacked(mdev);
@@ -4345,7 +4345,7 @@ struct data_cmd {
int expect_payload;
size_t pkt_size;
@@ -38290,6 +38594,15 @@ index d073305..4998fea 100644
tconn->send.seen_any_write_yet = false;
conn_info(tconn, "Connection closed\n");
+@@ -4947,7 +4947,7 @@ static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
+ put_ldev(mdev);
+ }
+ dec_rs_pending(mdev);
+- atomic_add(blksize >> 9, &mdev->rs_sect_in);
++ atomic_add_unchecked(blksize >> 9, &mdev->rs_sect_in);
+
+ return 0;
+ }
@@ -5221,7 +5221,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
struct asender_cmd {
size_t pkt_size;
@@ -38299,6 +38612,39 @@ index d073305..4998fea 100644
static struct asender_cmd asender_tbl[] = {
[P_PING] = { 0, got_Ping },
+diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
+index 84d3175..ccea188 100644
+--- a/drivers/block/drbd/drbd_worker.c
++++ b/drivers/block/drbd/drbd_worker.c
+@@ -400,7 +400,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
+ list_add(&peer_req->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->tconn->req_lock);
+
+- atomic_add(size >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(size >> 9, &mdev->rs_sect_ev);
+ if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+ return 0;
+
+@@ -498,7 +498,7 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
+ int max_sect;
+ struct fifo_buffer *plan;
+
+- sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
++ sect_in = atomic_xchg_unchecked(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
+ mdev->rs_in_flight -= sect_in;
+
+ dc = rcu_dereference(mdev->ldev->disk_conf);
+@@ -1561,8 +1561,8 @@ void drbd_rs_controller_reset(struct drbd_conf *mdev)
+ {
+ struct fifo_buffer *plan;
+
+- atomic_set(&mdev->rs_sect_in, 0);
+- atomic_set(&mdev->rs_sect_ev, 0);
++ atomic_set_unchecked(&mdev->rs_sect_in, 0);
++ atomic_set_unchecked(&mdev->rs_sect_ev, 0);
+ mdev->rs_in_flight = 0;
+
+ /* Updating the RCU protected object in place is necessary since
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 66e8c3b..9b68dd9 100644
--- a/drivers/block/loop.c
@@ -50165,6 +50511,32 @@ index 1b3a094..068e683 100644
}
}
EXPORT_SYMBOL(fc_exch_update_stats);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 4046241..4549986 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ return NULL;
+ }
+
++ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
++ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
++ return NULL;
++ }
++
+ task = conn->login_task;
+ } else {
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ return NULL;
+
++ if (data_size != 0) {
++ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
++ return NULL;
++ }
++
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index d289583..b745eec 100644
--- a/drivers/scsi/libsas/sas_ata.c
@@ -51070,6 +51442,19 @@ index 52b7731..d604da0 100644
op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
strlen(filename), mode, LUSTRE_OPC_MKDIR,
lump);
+diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
+index 6cfdb9e..1ddab59 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
++++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
+@@ -576,7 +576,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
+ if (sb->s_root == NULL) {
+ CERROR("%s: can't make root dentry\n",
+ ll_get_fsname(sb, NULL, 0));
+- GOTO(out_root, err = -ENOMEM);
++ GOTO(out_lock_cn_cb, err = -ENOMEM);
+ }
+
+ /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index 480b7c4..6846324 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -51284,7 +51669,7 @@ index d07fcb5..358e1e1 100644
return;
}
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
-index 6eecd53..29317c6 100644
+index 6eecd53..1025c8b 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
@@ -51313,6 +51698,16 @@ index 6eecd53..29317c6 100644
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
pDevice->apdev->type = ARPHRD_IEEE80211;
+@@ -385,6 +386,9 @@ static int hostap_set_generic_element(PSDevice pDevice,
+ {
+ PSMgmtObject pMgmt = pDevice->pMgmt;
+
++ if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE))
++ return -EINVAL;
++
+ memcpy(pMgmt->abyWPAIE,
+ param->u.generic_elem.data,
+ param->u.generic_elem.len
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index 67ba48b..24e602f 100644
--- a/drivers/staging/vt6656/hostap.c
@@ -58387,10 +58782,30 @@ index ff286f3..8153a14 100644
.attrs = attrs,
};
diff --git a/fs/buffer.c b/fs/buffer.c
-index 27265a8..289f488 100644
+index 27265a8..8673b7b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -3428,7 +3428,7 @@ void __init buffer_init(void)
+@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ bh = page_buffers(page);
+ if (bh->b_size == size) {
+ end_block = init_page_buffers(page, bdev,
+- index << sizebits, size);
++ (sector_t)index << sizebits,
++ size);
+ goto done;
+ }
+ if (!try_to_free_buffers(page))
+@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ */
+ spin_lock(&inode->i_mapping->private_lock);
+ link_dev_buffers(page, bh);
+- end_block = init_page_buffers(page, bdev, index << sizebits, size);
++ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
++ size);
+ spin_unlock(&inode->i_mapping->private_lock);
+ done:
+ ret = (block < end_block) ? 1 : -ENXIO;
+@@ -3428,7 +3430,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -62726,7 +63141,7 @@ index b29e42f..5ea7fdf 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index d5a4fae..d221b37 100644
+index d5a4fae..27e6c48 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,34 @@ int generic_permission(struct inode *inode, int mask)
@@ -62782,7 +63197,42 @@ index d5a4fae..d221b37 100644
return -EACCES;
}
-@@ -823,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -642,24 +651,22 @@ static int complete_walk(struct nameidata *nd)
+
+ static __always_inline void set_root(struct nameidata *nd)
+ {
+- if (!nd->root.mnt)
+- get_fs_root(current->fs, &nd->root);
++ get_fs_root(current->fs, &nd->root);
+ }
+
+ static int link_path_walk(const char *, struct nameidata *);
+
+-static __always_inline void set_root_rcu(struct nameidata *nd)
++static __always_inline unsigned set_root_rcu(struct nameidata *nd)
+ {
+- if (!nd->root.mnt) {
+- struct fs_struct *fs = current->fs;
+- unsigned seq;
++ struct fs_struct *fs = current->fs;
++ unsigned seq, res;
+
+- do {
+- seq = read_seqcount_begin(&fs->seq);
+- nd->root = fs->root;
+- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
+- } while (read_seqcount_retry(&fs->seq, seq));
+- }
++ do {
++ seq = read_seqcount_begin(&fs->seq);
++ nd->root = fs->root;
++ res = __read_seqcount_begin(&nd->root.dentry->d_seq);
++ } while (read_seqcount_retry(&fs->seq, seq));
++ return res;
+ }
+
+ static void path_put_conditional(struct path *path, struct nameidata *nd)
+@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
{
struct dentry *dentry = link->dentry;
int error;
@@ -62791,7 +63241,7 @@ index d5a4fae..d221b37 100644
BUG_ON(nd->flags & LOOKUP_RCU);
-@@ -844,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
if (error)
goto out_put_nd_path;
@@ -62804,7 +63254,68 @@ index d5a4fae..d221b37 100644
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
-@@ -1592,6 +1607,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -859,7 +872,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ return PTR_ERR(s);
+ }
+ if (*s == '/') {
+- set_root(nd);
++ if (!nd->root.mnt)
++ set_root(nd);
+ path_put(&nd->path);
+ nd->path = nd->root;
+ path_get(&nd->root);
+@@ -1132,7 +1146,9 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+
+ static int follow_dotdot_rcu(struct nameidata *nd)
+ {
+- set_root_rcu(nd);
++ struct inode *inode = nd->inode;
++ if (!nd->root.mnt)
++ set_root_rcu(nd);
+
+ while (1) {
+ if (nd->path.dentry == nd->root.dentry &&
+@@ -1144,6 +1160,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ struct dentry *parent = old->d_parent;
+ unsigned seq;
+
++ inode = parent->d_inode;
+ seq = read_seqcount_begin(&parent->d_seq);
+ if (read_seqcount_retry(&old->d_seq, nd->seq))
+ goto failed;
+@@ -1153,6 +1170,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ }
+ if (!follow_up_rcu(&nd->path))
+ break;
++ inode = nd->path.dentry->d_inode;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ }
+ while (d_mountpoint(nd->path.dentry)) {
+@@ -1162,11 +1180,12 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ break;
+ nd->path.mnt = &mounted->mnt;
+ nd->path.dentry = mounted->mnt.mnt_root;
++ inode = nd->path.dentry->d_inode;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ if (!read_seqretry(&mount_lock, nd->m_seq))
+ goto failed;
+ }
+- nd->inode = nd->path.dentry->d_inode;
++ nd->inode = inode;
+ return 0;
+
+ failed:
+@@ -1244,7 +1263,8 @@ static void follow_mount(struct path *path)
+
+ static void follow_dotdot(struct nameidata *nd)
+ {
+- set_root(nd);
++ if (!nd->root.mnt)
++ set_root(nd);
+
+ while(1) {
+ struct dentry *old = nd->path.dentry;
+@@ -1592,6 +1612,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
@@ -62813,7 +63324,7 @@ index d5a4fae..d221b37 100644
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1664,7 +1681,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1664,7 +1686,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -62822,7 +63333,32 @@ index d5a4fae..d221b37 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -1948,6 +1965,8 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1842,7 +1864,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+ if (*name=='/') {
+ if (flags & LOOKUP_RCU) {
+ rcu_read_lock();
+- set_root_rcu(nd);
++ nd->seq = set_root_rcu(nd);
+ } else {
+ set_root(nd);
+ path_get(&nd->root);
+@@ -1893,7 +1915,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+ }
+
+ nd->inode = nd->path.dentry->d_inode;
+- return 0;
++ if (!(flags & LOOKUP_RCU))
++ return 0;
++ if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
++ return 0;
++ if (!(nd->flags & LOOKUP_ROOT))
++ nd->root.mnt = NULL;
++ rcu_read_unlock();
++ return -ECHILD;
+ }
+
+ static inline int lookup_last(struct nameidata *nd, struct path *path)
+@@ -1948,6 +1977,8 @@ static int path_lookupat(int dfd, const char *name,
if (err)
break;
err = lookup_last(nd, &path);
@@ -62831,7 +63367,7 @@ index d5a4fae..d221b37 100644
put_link(nd, &link, cookie);
}
}
-@@ -1955,6 +1974,13 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1955,6 +1986,13 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -62845,7 +63381,7 @@ index d5a4fae..d221b37 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
-@@ -1982,8 +2008,15 @@ static int filename_lookup(int dfd, struct filename *name,
+@@ -1982,8 +2020,15 @@ static int filename_lookup(int dfd, struct filename *name,
retval = path_lookupat(dfd, name->name,
flags | LOOKUP_REVAL, nd);
@@ -62862,7 +63398,7 @@ index d5a4fae..d221b37 100644
return retval;
}
-@@ -2558,6 +2591,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2558,6 +2603,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -62876,7 +63412,7 @@ index d5a4fae..d221b37 100644
return 0;
}
-@@ -2789,7 +2829,7 @@ looked_up:
+@@ -2789,7 +2841,7 @@ looked_up:
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
@@ -62885,7 +63421,7 @@ index d5a4fae..d221b37 100644
const struct open_flags *op,
bool got_write, int *opened)
{
-@@ -2824,6 +2864,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2824,6 +2876,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -62903,7 +63439,7 @@ index d5a4fae..d221b37 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2845,6 +2896,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2845,6 +2908,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -62912,7 +63448,7 @@ index d5a4fae..d221b37 100644
}
out_no_open:
path->dentry = dentry;
-@@ -2859,7 +2912,7 @@ out_dput:
+@@ -2859,7 +2924,7 @@ out_dput:
/*
* Handle the last step of open()
*/
@@ -62921,7 +63457,7 @@ index d5a4fae..d221b37 100644
struct file *file, const struct open_flags *op,
int *opened, struct filename *name)
{
-@@ -2909,6 +2962,15 @@ static int do_last(struct nameidata *nd, struct path *path,
+@@ -2909,6 +2974,15 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
@@ -62937,7 +63473,7 @@ index d5a4fae..d221b37 100644
audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
-@@ -2928,7 +2990,7 @@ retry_lookup:
+@@ -2928,7 +3002,7 @@ retry_lookup:
*/
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -62946,7 +63482,7 @@ index d5a4fae..d221b37 100644
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
-@@ -2952,11 +3014,28 @@ retry_lookup:
+@@ -2952,11 +3026,28 @@ retry_lookup:
goto finish_open_created;
}
@@ -62976,7 +63512,7 @@ index d5a4fae..d221b37 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -2997,6 +3076,11 @@ finish_lookup:
+@@ -2997,6 +3088,11 @@ finish_lookup:
}
}
BUG_ON(inode != path->dentry->d_inode);
@@ -62988,7 +63524,7 @@ index d5a4fae..d221b37 100644
return 1;
}
-@@ -3006,7 +3090,6 @@ finish_lookup:
+@@ -3006,7 +3102,6 @@ finish_lookup:
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path->mnt);
nd->path.dentry = path->dentry;
@@ -62996,7 +63532,7 @@ index d5a4fae..d221b37 100644
}
nd->inode = inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
-@@ -3016,7 +3099,18 @@ finish_open:
+@@ -3016,7 +3111,18 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -63015,7 +63551,7 @@ index d5a4fae..d221b37 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3179,7 +3273,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3179,7 +3285,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -63024,7 +63560,7 @@ index d5a4fae..d221b37 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3197,7 +3291,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3197,7 +3303,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -63033,7 +63569,7 @@ index d5a4fae..d221b37 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3297,9 +3391,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3297,9 +3403,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -63047,7 +63583,7 @@ index d5a4fae..d221b37 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3351,6 +3447,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3351,6 +3459,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -63068,7 +63604,7 @@ index d5a4fae..d221b37 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3413,6 +3523,17 @@ retry:
+@@ -3413,6 +3535,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -63086,7 +63622,7 @@ index d5a4fae..d221b37 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3429,6 +3550,8 @@ retry:
+@@ -3429,6 +3562,8 @@ retry:
break;
}
out:
@@ -63095,7 +63631,7 @@ index d5a4fae..d221b37 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3481,9 +3604,16 @@ retry:
+@@ -3481,9 +3616,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -63112,7 +63648,7 @@ index d5a4fae..d221b37 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3564,6 +3694,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3564,6 +3706,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -63121,7 +63657,7 @@ index d5a4fae..d221b37 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3596,10 +3728,21 @@ retry:
+@@ -3596,10 +3740,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -63143,7 +63679,7 @@ index d5a4fae..d221b37 100644
exit3:
dput(dentry);
exit2:
-@@ -3689,6 +3832,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3689,6 +3844,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -63152,7 +63688,7 @@ index d5a4fae..d221b37 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3715,10 +3860,22 @@ retry_deleg:
+@@ -3715,10 +3872,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -63175,7 +63711,7 @@ index d5a4fae..d221b37 100644
exit2:
dput(dentry);
}
-@@ -3806,9 +3963,17 @@ retry:
+@@ -3806,9 +3975,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -63193,7 +63729,7 @@ index d5a4fae..d221b37 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3911,6 +4076,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3911,6 +4088,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -63201,7 +63737,7 @@ index d5a4fae..d221b37 100644
int how = 0;
int error;
-@@ -3934,7 +4100,7 @@ retry:
+@@ -3934,7 +4112,7 @@ retry:
if (error)
return error;
@@ -63210,7 +63746,7 @@ index d5a4fae..d221b37 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3946,11 +4112,28 @@ retry:
+@@ -3946,11 +4124,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -63239,7 +63775,7 @@ index d5a4fae..d221b37 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4237,6 +4420,12 @@ retry_deleg:
+@@ -4237,6 +4432,12 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -63252,7 +63788,7 @@ index d5a4fae..d221b37 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
-@@ -4244,6 +4433,9 @@ retry_deleg:
+@@ -4244,6 +4445,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode);
@@ -63262,7 +63798,7 @@ index d5a4fae..d221b37 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4280,6 +4472,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4280,6 +4484,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -63271,7 +63807,7 @@ index d5a4fae..d221b37 100644
int len;
len = PTR_ERR(link);
-@@ -4289,7 +4483,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -4289,7 +4495,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -97308,7 +97844,7 @@ index cdbd312..2e1e0b9 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index ff85863..6aa94ab 100644
+index ff85863..7037c25 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -97329,7 +97865,19 @@ index ff85863..6aa94ab 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -2298,6 +2298,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2143,8 +2143,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
+
+ if (new_dentry->d_inode) {
+ (void) shmem_unlink(new_dir, new_dentry);
+- if (they_are_dirs)
++ if (they_are_dirs) {
++ drop_nlink(new_dentry->d_inode);
+ drop_nlink(old_dir);
++ }
+ } else if (they_are_dirs) {
+ drop_nlink(old_dir);
+ inc_nlink(new_dir);
+@@ -2298,6 +2300,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -97341,7 +97889,7 @@ index ff85863..6aa94ab 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2353,6 +2358,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2353,6 +2360,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -97357,7 +97905,7 @@ index ff85863..6aa94ab 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -2665,8 +2679,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2665,8 +2681,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -97368,7 +97916,7 @@ index ff85863..6aa94ab 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 6dd8d5f..2482a6d 100644
+index 6dd8d5f..673c763 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -97421,7 +97969,32 @@ index 6dd8d5f..2482a6d 100644
slab_early_init = 0;
-@@ -3484,6 +3488,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -2189,7 +2193,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
+ int
+ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ {
+- size_t left_over, freelist_size, ralign;
++ size_t left_over, freelist_size;
++ size_t ralign = BYTES_PER_WORD;
+ gfp_t gfp;
+ int err;
+ size_t size = cachep->size;
+@@ -2222,14 +2227,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ size &= ~(BYTES_PER_WORD - 1);
+ }
+
+- /*
+- * Redzoning and user store require word alignment or possibly larger.
+- * Note this will be overridden by architecture or caller mandated
+- * alignment if either is greater than BYTES_PER_WORD.
+- */
+- if (flags & SLAB_STORE_USER)
+- ralign = BYTES_PER_WORD;
+-
+ if (flags & SLAB_RED_ZONE) {
+ ralign = REDZONE_ALIGN;
+ /* If redzoning, ensure that the second redzone is suitably
+@@ -3484,6 +3481,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
@@ -97443,7 +98016,7 @@ index 6dd8d5f..2482a6d 100644
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3712,6 +3731,7 @@ void kfree(const void *objp)
+@@ -3712,6 +3724,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -97451,7 +98024,7 @@ index 6dd8d5f..2482a6d 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4153,14 +4173,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+@@ -4153,14 +4166,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
}
/* cpu stats */
{
@@ -97478,7 +98051,7 @@ index 6dd8d5f..2482a6d 100644
#endif
}
-@@ -4381,13 +4409,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4381,13 +4402,69 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -105460,11 +106033,11 @@ index 078fe1d..fbdb363 100644
fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
new file mode 100644
-index 0000000..3fd3699
+index 0000000..42018ed
--- /dev/null
+++ b/scripts/gcc-plugin.sh
-@@ -0,0 +1,43 @@
-+#!/bin/bash
+@@ -0,0 +1,51 @@
++#!/bin/sh
+srctree=$(dirname "$0")
+gccplugins_dir=$($3 -print-file-name=plugin)
+plugincc=$($1 -E - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
@@ -105482,15 +106055,23 @@ index 0000000..3fd3699
+ exit 1
+fi
+
-+if [[ "$plugincc" =~ "$1 CC" ]]
-+then
-+ echo "$1"
-+ exit 0
-+fi
++case "$plugincc" in
++ *"$1 CC"*)
++ echo "$1"
++ exit 0
++ ;;
+
-+if [[ "$plugincc" =~ "$2 CXX" ]]
-+then
-+plugincc=$($1 -c -x c++ -std=gnu++98 - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
++ *"$2 CXX"*)
++ # the c++ compiler needs another test, see below
++ ;;
++
++ *)
++ exit 1
++ ;;
++esac
++
++# we need a c++ compiler that supports the designated initializer GNU extension
++plugincc=$($2 -c -x c++ -std=gnu++98 - -fsyntax-only -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
+#include "gcc-common.h"
+class test {
+public:
@@ -105500,12 +106081,12 @@ index 0000000..3fd3699
+};
+EOF
+)
++
+if [ $? -eq 0 ]
+then
+ echo "$2"
+ exit 0
+fi
-+fi
+exit 1
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 5de5660..d3deb89 100644
diff --git a/3.16.3/0000_README b/3.16.3/0000_README
index 05a4f78..47d2ef9 100644
--- a/3.16.3/0000_README
+++ b/3.16.3/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.16.3-201409180901.patch
+Patch: 4420_grsecurity-3.0-3.16.3-201409282025.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.16.3/4420_grsecurity-3.0-3.16.3-201409180901.patch b/3.16.3/4420_grsecurity-3.0-3.16.3-201409282025.patch
index a8197ee..9207cde 100644
--- a/3.16.3/4420_grsecurity-3.0-3.16.3-201409180901.patch
+++ b/3.16.3/4420_grsecurity-3.0-3.16.3-201409282025.patch
@@ -234,6 +234,20 @@ index 9de9813..1462492 100644
zconf.hash.c
+zconf.lex.c
zoffset.h
+diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
+index a1d0d7a..61d65cc 100644
+--- a/Documentation/filesystems/vfs.txt
++++ b/Documentation/filesystems/vfs.txt
+@@ -1053,7 +1053,8 @@ struct dentry_operations {
+ If the 'rcu_walk' parameter is true, then the caller is doing a
+ pathwalk in RCU-walk mode. Sleeping is not permitted in this mode,
+ and the caller can be asked to leave it and call again by returning
+- -ECHILD.
++ -ECHILD. -EISDIR may also be returned to tell pathwalk to
++ ignore d_automount or any mounts.
+
+ This function is only used if DCACHE_MANAGE_TRANSIT is set on the
+ dentry being transited from.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b7fa2f5..90cd9f8 100644
--- a/Documentation/kernel-parameters.txt
@@ -2700,6 +2714,95 @@ index e4e4208..086684a 100644
/*
* Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 83259b8..8c7e01d 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -1,6 +1,9 @@
+ #ifndef __ASMARM_TLS_H
+ #define __ASMARM_TLS_H
+
++#include <linux/compiler.h>
++#include <asm/thread_info.h>
++
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+ .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
+@@ -50,6 +53,50 @@
+ #endif
+
+ #ifndef __ASSEMBLY__
++#include <asm/pgtable.h>
++
++static inline void set_tls(unsigned long val)
++{
++ struct thread_info *thread;
++
++ thread = current_thread_info();
++
++ thread->tp_value[0] = val;
++
++ /*
++ * This code runs with preemption enabled and therefore must
++ * be reentrant with respect to switch_tls.
++ *
++ * We need to ensure ordering between the shadow state and the
++ * hardware state, so that we don't corrupt the hardware state
++ * with a stale shadow state during context switch.
++ *
++ * If we're preempted here, switch_tls will load TPIDRURO from
++ * thread_info upon resuming execution and the following mcr
++ * is merely redundant.
++ */
++ barrier();
++
++ if (!tls_emu) {
++ if (has_tls_reg) {
++ asm("mcr p15, 0, %0, c13, c0, 3"
++ : : "r" (val));
++ } else {
++ /*
++ * User space must never try to access this
++ * directly. Expect your app to break
++ * eventually if you do so. The user helper
++ * at 0xffff0fe0 must be used instead. (see
++ * entry-armv.S for details)
++ */
++ pax_open_kernel();
++ *((unsigned int *)0xffff0ff0) = val;
++ pax_close_kernel();
++ }
++
++ }
++}
++
+ static inline unsigned long get_tpuser(void)
+ {
+ unsigned long reg = 0;
+@@ -59,5 +106,23 @@ static inline unsigned long get_tpuser(void)
+
+ return reg;
+ }
++
++static inline void set_tpuser(unsigned long val)
++{
++ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
++ * we need not update thread_info.
++ */
++ if (has_tls_reg && !tls_emu) {
++ asm("mcr p15, 0, %0, c13, c0, 2"
++ : : "r" (val));
++ }
++}
++
++static inline void flush_tls(void)
++{
++ set_tls(0);
++ set_tpuser(0);
++}
++
+ #endif
+ #endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 75d9579..b5b40e4 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -3375,7 +3478,7 @@ index 07314af..c46655c 100644
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 81ef686..f4130b8 100644
+index 81ef686..7af43a0 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -212,6 +212,7 @@ void machine_power_off(void)
@@ -3406,7 +3509,16 @@ index 81ef686..f4130b8 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -334,6 +335,8 @@ void flush_thread(void)
+ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+ memset(&thread->fpstate, 0, sizeof(union fp_state));
+
++ flush_tls();
++
+ thread_notify(THREAD_NOTIFY_FLUSH, thread);
+ }
+
+@@ -425,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -3419,7 +3531,7 @@ index 81ef686..f4130b8 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
+@@ -446,7 +443,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -3428,7 +3540,7 @@ index 81ef686..f4130b8 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
+@@ -472,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -3652,8 +3764,21 @@ index 7a3be1d..b00c7de 100644
pr_debug("CPU ITCM: copied code from %p - %p\n",
start, end);
itcm_present = true;
+diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
+index 7b8403b..80f0d69 100644
+--- a/arch/arm/kernel/thumbee.c
++++ b/arch/arm/kernel/thumbee.c
+@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+- thread->thumbee_state = 0;
++ teehbr_write(0);
+ break;
+ case THREAD_NOTIFY_SWITCH:
+ current_thread_info()->thumbee_state = teehbr_read();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index abd2fc0..895dbb6 100644
+index abd2fc0..1e2696e 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3684,17 +3809,38 @@ index abd2fc0..895dbb6 100644
if (signr)
do_exit(signr);
}
-@@ -643,7 +648,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
- * The user helper at 0xffff0fe0 must be used instead.
- * (see entry-armv.S for details)
- */
-+ pax_open_kernel();
- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
-+ pax_close_kernel();
- }
+@@ -579,7 +584,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
+ #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
+ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+ {
+- struct thread_info *thread = current_thread_info();
+ siginfo_t info;
+
+ if ((no >> 16) != (__ARM_NR_BASE>> 16))
+@@ -630,21 +634,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
+ return regs->ARM_r0;
+
+ case NR(set_tls):
+- thread->tp_value[0] = regs->ARM_r0;
+- if (tls_emu)
+- return 0;
+- if (has_tls_reg) {
+- asm ("mcr p15, 0, %0, c13, c0, 3"
+- : : "r" (regs->ARM_r0));
+- } else {
+- /*
+- * User space must never try to access this directly.
+- * Expect your app to break eventually if you do so.
+- * The user helper at 0xffff0fe0 must be used instead.
+- * (see entry-armv.S for details)
+- */
+- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
+- }
++ set_tls(regs->ARM_r0);
return 0;
-@@ -900,7 +907,11 @@ void __init early_trap_init(void *vectors_base)
+ #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
+@@ -900,7 +890,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -18499,10 +18645,10 @@ index ed5903b..c7fe163 100644
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 5be9063..d62185b 100644
+index 5be9063..0c42843 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,10 +16,14 @@
+@@ -16,10 +16,15 @@
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
@@ -18515,11 +18661,12 @@ index 5be9063..d62185b 100644
-extern pmd_t level2_ident_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pmd_t level2_ident_pgt[512*2];
++extern pte_t level1_fixmap_pgt[512];
+extern pgd_t init_level4_pgt[512];
#define swapper_pg_dir init_level4_pgt
-@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -61,7 +66,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -18529,7 +18676,7 @@ index 5be9063..d62185b 100644
}
static inline void native_pmd_clear(pmd_t *pmd)
-@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+@@ -97,7 +104,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
@@ -18539,7 +18686,7 @@ index 5be9063..d62185b 100644
}
static inline void native_pud_clear(pud_t *pud)
-@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
+@@ -107,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -22155,7 +22302,7 @@ index f6dfd93..892ade4 100644
.__cr3 = __pa_nodebug(swapper_pg_dir),
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
-index b74ebc7..6dbb0c5 100644
+index b74ebc7..2c95874 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -2,6 +2,9 @@
@@ -22168,7 +22315,15 @@ index b74ebc7..6dbb0c5 100644
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
-@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
+@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
+
+ void printk_address(unsigned long address)
+ {
+- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
++ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
+ }
+
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
@@ -37608,7 +37763,7 @@ index ffb101e..98c0ecf 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index e8a1201..046c66c 100644
+index e8a1201..e1fb520 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
@@ -37620,17 +37775,63 @@ index e8a1201..046c66c 100644
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
-@@ -1904,6 +1904,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
+ *
+ * We can construct this by grafting the Xen provided pagetable into
+ * head_64.S's preconstructed pagetables. We copy the Xen L2's into
+- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
+- * means that only the kernel has a physical mapping to start with -
+- * but that's enough to get __va working. We need to fill in the rest
+- * of the physical mapping once some sort of allocator has been set
+- * up.
+- * NOTE: for PVH, the page tables are native.
++ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
++ * kernel has a physical mapping to start with - but that's enough to
++ * get __va working. We need to fill in the rest of the physical
++ * mapping once some sort of allocator has been set up. NOTE: for
++ * PVH, the page tables are native.
+ */
+ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ {
+@@ -1902,8 +1901,14 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ /* L3_i[0] -> level2_ident_pgt */
+ convert_pfn_mfn(level3_ident_pgt);
/* L3_k[510] -> level2_kernel_pgt
- * L3_i[511] -> level2_fixmap_pgt */
+- * L3_i[511] -> level2_fixmap_pgt */
++ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
+ convert_pfn_mfn(level3_vmalloc_start_pgt);
+ convert_pfn_mfn(level3_vmalloc_end_pgt);
+ convert_pfn_mfn(level3_vmemmap_pgt);
++
++ /* L3_k[511][506] -> level1_fixmap_pgt */
++ convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
-@@ -1933,8 +1936,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1913,30 +1918,29 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ addr[1] = (unsigned long)l3;
+ addr[2] = (unsigned long)l2;
+ /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
+- * Both L4[272][0] and L4[511][511] have entries that point to the same
++ * Both L4[272][0] and L4[511][510] have entries that point to the same
+ * L2 (PMD) tables. Meaning that if you modify it in __va space
+ * it will be also modified in the __ka space! (But if you just
+ * modify the PMD table to point to other PTE's or none, then you
+ * are OK - which is what cleanup_highmap does) */
+ copy_page(level2_ident_pgt, l2);
+- /* Graft it onto L4[511][511] */
++ /* Graft it onto L4[511][510] */
+ copy_page(level2_kernel_pgt, l2);
+
+- /* Get [511][510] and graft that in level2_fixmap_pgt */
+- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
+- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
+- copy_page(level2_fixmap_pgt, l2);
+- /* Note that we don't do anything with level1_fixmap_pgt which
+- * we don't need. */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -37642,8 +37843,11 @@ index e8a1201..046c66c 100644
+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
-@@ -2120,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
+ /* Pin down new L4 */
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+@@ -2120,6 +2124,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -37651,7 +37855,7 @@ index e8a1201..046c66c 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2198,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2198,6 +2203,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -37891,6 +38095,18 @@ index 28d227c..d4c0bad 100644
done:
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+index f4d27b1..9924725 100644
+--- a/block/blk-exec.c
++++ b/block/blk-exec.c
+@@ -56,6 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ bool is_pm_resume;
+
+ WARN_ON(irqs_disabled());
++ WARN_ON(rq->cmd_type == REQ_TYPE_FS);
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 0736729..2ec3b48 100644
--- a/block/blk-iopoll.c
@@ -37917,6 +38133,53 @@ index f890d43..97b0482 100644
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index ad69ef6..034c0ff 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -219,7 +219,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
+ if (tag != BLK_MQ_TAG_FAIL) {
+ rq = data->hctx->tags->rqs[tag];
+
+- rq->cmd_flags = 0;
+ if (blk_mq_tag_busy(data->hctx)) {
+ rq->cmd_flags = REQ_MQ_INFLIGHT;
+ atomic_inc(&data->hctx->nr_active);
+@@ -274,6 +273,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
+
+ if (rq->cmd_flags & REQ_MQ_INFLIGHT)
+ atomic_dec(&hctx->nr_active);
++ rq->cmd_flags = 0;
+
+ clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ blk_mq_put_tag(hctx, tag, &ctx->last_tag);
+@@ -973,14 +973,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+- !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
+- blk_insert_flush(rq);
+- } else {
+- spin_lock(&ctx->lock);
+- __blk_mq_insert_request(hctx, rq, at_head);
+- spin_unlock(&ctx->lock);
+- }
++ spin_lock(&ctx->lock);
++ __blk_mq_insert_request(hctx, rq, at_head);
++ spin_unlock(&ctx->lock);
+
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, async);
+@@ -1411,6 +1406,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
+ left -= to_do * rq_size;
+ for (j = 0; j < to_do; j++) {
+ tags->rqs[i] = p;
++ tags->rqs[i]->atomic_flags = 0;
++ tags->rqs[i]->cmd_flags = 0;
+ if (set->ops->init_request) {
+ if (set->ops->init_request(set->driver_data,
+ tags->rqs[i], hctx_idx, i,
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 53b1737..08177d2e 100644
--- a/block/blk-softirq.c
@@ -39868,8 +40131,21 @@ index be73e9d..7fbf140 100644
cmdlist_t *reqQ;
cmdlist_t *cmpQ;
+diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
+index 1aa29f8..080c42f 100644
+--- a/drivers/block/drbd/drbd_bitmap.c
++++ b/drivers/block/drbd/drbd_bitmap.c
+@@ -1042,7 +1042,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
+ submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+- atomic_add(len >> 9, &device->rs_sect_ev);
++ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
+ }
+ }
+
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index a76ceb3..3c1a9fd 100644
+index a76ceb3..a4c80cf 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -331,7 +331,7 @@ struct drbd_epoch {
@@ -39890,6 +40166,17 @@ index a76ceb3..3c1a9fd 100644
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned int minor;
+@@ -807,8 +807,8 @@ struct drbd_device {
+ struct mutex own_state_mutex;
+ struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
+ char congestion_reason; /* Why we where congested... */
+- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
+- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
++ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
++ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
+ int rs_last_sect_ev; /* counter to compare with */
+ int rs_last_events; /* counter of read or write "events" (unit sectors)
+ * on the lower level device when we last looked. */
@@ -1407,7 +1407,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
char __user *uoptval;
int err;
@@ -39917,7 +40204,7 @@ index 89c497c..9c736ae 100644
/**
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index 960645c..6c2724a 100644
+index 960645c..61ede05 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1322,7 +1322,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
@@ -39938,6 +40225,17 @@ index 960645c..6c2724a 100644
dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
if (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T)
+@@ -1905,8 +1905,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
+ atomic_set(&device->unacked_cnt, 0);
+ atomic_set(&device->local_cnt, 0);
+ atomic_set(&device->pp_in_use_by_net, 0);
+- atomic_set(&device->rs_sect_in, 0);
+- atomic_set(&device->rs_sect_ev, 0);
++ atomic_set_unchecked(&device->rs_sect_in, 0);
++ atomic_set_unchecked(&device->rs_sect_ev, 0);
+ atomic_set(&device->ap_in_flight, 0);
+ atomic_set(&device->md_io_in_use, 0);
+
@@ -2670,8 +2670,8 @@ void drbd_destroy_connection(struct kref *kref)
struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
struct drbd_resource *resource = connection->resource;
@@ -39972,7 +40270,7 @@ index 3f2e167..d3170e4 100644
if (!msg)
goto failed;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
-index 5b17ec8..6c21e6b 100644
+index 5b17ec8..deaec7d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
@@ -40025,6 +40323,24 @@ index 5b17ec8..6c21e6b 100644
list_add(&epoch->list, &connection->current_epoch->list);
connection->current_epoch = epoch;
connection->epochs++;
+@@ -1739,7 +1739,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
+ list_add(&peer_req->w.list, &device->sync_ee);
+ spin_unlock_irq(&device->resource->req_lock);
+
+- atomic_add(pi->size >> 9, &device->rs_sect_ev);
++ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
+ if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return 0;
+
+@@ -1837,7 +1837,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
+ drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
+ }
+
+- atomic_add(pi->size >> 9, &device->rs_sect_in);
++ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
+
+ return err;
+ }
@@ -2224,7 +2224,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
err = wait_for_and_update_peer_seq(peer_device, peer_seq);
@@ -40043,6 +40359,33 @@ index 5b17ec8..6c21e6b 100644
atomic_inc(&peer_req->epoch->active);
spin_unlock(&connection->epoch_lock);
+@@ -2406,7 +2406,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
+
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&device->rs_sect_ev);
++ atomic_read_unchecked(&device->rs_sect_ev);
+ if (!device->rs_last_events || curr_events - device->rs_last_events > 64) {
+ unsigned long rs_left;
+ int i;
+@@ -2540,7 +2540,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
+ device->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ } else if (pi->cmd == P_OV_REPLY) {
+ /* track progress, we may need to throttle */
+- atomic_add(size >> 9, &device->rs_sect_in);
++ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
+ peer_req->w.cb = w_e_end_ov_reply;
+ dec_rs_pending(device);
+ /* drbd_rs_begin_io done when we sent this request,
+@@ -2601,7 +2601,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
+ goto out_free_e;
+
+ submit_for_resync:
+- atomic_add(size >> 9, &device->rs_sect_ev);
++ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
+
+ submit:
+ inc_unacked(device);
@@ -4461,7 +4461,7 @@ struct data_cmd {
int expect_payload;
size_t pkt_size;
@@ -40061,6 +40404,15 @@ index 5b17ec8..6c21e6b 100644
connection->send.seen_any_write_yet = false;
drbd_info(connection, "Connection closed\n");
+@@ -5076,7 +5076,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
+ put_ldev(device);
+ }
+ dec_rs_pending(device);
+- atomic_add(blksize >> 9, &device->rs_sect_in);
++ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
+
+ return 0;
+ }
@@ -5364,7 +5364,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
struct asender_cmd {
size_t pkt_size;
@@ -40070,6 +40422,39 @@ index 5b17ec8..6c21e6b 100644
static struct asender_cmd asender_tbl[] = {
[P_PING] = { 0, got_Ping },
+diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
+index d8f57b6..8dbf4b4 100644
+--- a/drivers/block/drbd/drbd_worker.c
++++ b/drivers/block/drbd/drbd_worker.c
+@@ -413,7 +413,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
+ list_add(&peer_req->w.list, &device->read_ee);
+ spin_unlock_irq(&device->resource->req_lock);
+
+- atomic_add(size >> 9, &device->rs_sect_ev);
++ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
+ if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+ return 0;
+
+@@ -558,7 +558,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
+ unsigned int sect_in; /* Number of sectors that came in since the last turn */
+ int number, mxb;
+
+- sect_in = atomic_xchg(&device->rs_sect_in, 0);
++ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
+ device->rs_in_flight -= sect_in;
+
+ rcu_read_lock();
+@@ -1583,8 +1583,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
+ {
+ struct fifo_buffer *plan;
+
+- atomic_set(&device->rs_sect_in, 0);
+- atomic_set(&device->rs_sect_ev, 0);
++ atomic_set_unchecked(&device->rs_sect_in, 0);
++ atomic_set_unchecked(&device->rs_sect_ev, 0);
+ device->rs_in_flight = 0;
+
+ /* Updating the RCU protected object in place is necessary since
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6cb1beb..bf490f7 100644
--- a/drivers/block/loop.c
@@ -52835,6 +53220,32 @@ index 1b3a094..068e683 100644
}
}
EXPORT_SYMBOL(fc_exch_update_stats);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 3d1bc67..874bc95 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ return NULL;
+ }
+
++ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
++ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
++ return NULL;
++ }
++
+ task = conn->login_task;
+ } else {
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ return NULL;
+
++ if (data_size != 0) {
++ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
++ return NULL;
++ }
++
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 766098a..1c6c971 100644
--- a/drivers/scsi/libsas/sas_ata.c
@@ -53702,6 +54113,19 @@ index ae6f61a..03c3d5d 100644
op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
strlen(filename), mode, LUSTRE_OPC_MKDIR,
lump);
+diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
+index deca27e..22fb433 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
++++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
+@@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
+ if (sb->s_root == NULL) {
+ CERROR("%s: can't make root dentry\n",
+ ll_get_fsname(sb, NULL, 0));
+- GOTO(out_root, err = -ENOMEM);
++ GOTO(out_lock_cn_cb, err = -ENOMEM);
+ }
+
+ sbi->ll_sdev_orig = sb->s_dev;
diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
index f670469..03b7438 100644
--- a/drivers/staging/media/solo6x10/solo6x10-core.c
@@ -53903,7 +54327,7 @@ index d07fcb5..358e1e1 100644
return;
}
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
-index 317c2a8..ffeb4ef 100644
+index 317c2a8..7876515 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
@@ -53932,6 +54356,16 @@ index 317c2a8..ffeb4ef 100644
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
pDevice->apdev->type = ARPHRD_IEEE80211;
+@@ -350,6 +351,9 @@ static int hostap_set_generic_element(PSDevice pDevice,
+ {
+ PSMgmtObject pMgmt = pDevice->pMgmt;
+
++ if (param->u.generic_elem.len > sizeof(pMgmt->abyWPAIE))
++ return -EINVAL;
++
+ memcpy(pMgmt->abyWPAIE,
+ param->u.generic_elem.data,
+ param->u.generic_elem.len
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e7e9372..161f530 100644
--- a/drivers/target/sbp/sbp_target.c
@@ -61057,10 +61491,30 @@ index 7f5b41b..e589c13 100644
static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
diff --git a/fs/buffer.c b/fs/buffer.c
-index eba6e4f..af1182c 100644
+index eba6e4f..8d8230c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -3429,7 +3429,7 @@ void __init buffer_init(void)
+@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ bh = page_buffers(page);
+ if (bh->b_size == size) {
+ end_block = init_page_buffers(page, bdev,
+- index << sizebits, size);
++ (sector_t)index << sizebits,
++ size);
+ goto done;
+ }
+ if (!try_to_free_buffers(page))
+@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ */
+ spin_lock(&inode->i_mapping->private_lock);
+ link_dev_buffers(page, bh);
+- end_block = init_page_buffers(page, bdev, index << sizebits, size);
++ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
++ size);
+ spin_unlock(&inode->i_mapping->private_lock);
+ done:
+ ret = (block < end_block) ? 1 : -ENXIO;
+@@ -3429,7 +3431,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -61070,10 +61524,10 @@ index eba6e4f..af1182c 100644
/*
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
-index d749731..dd333a6 100644
+index d749731..0fda764 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
-@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
+@@ -39,29 +39,27 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
args);
/* start by checking things over */
@@ -61089,8 +61543,36 @@ index d749731..dd333a6 100644
cache->bcull_percent < cache->brun_percent &&
cache->brun_percent < 100);
+ if (*args) {
+- pr_err("'bind' command doesn't take an argument");
++ pr_err("'bind' command doesn't take an argument\n");
+ return -EINVAL;
+ }
+
+ if (!cache->rootdirname) {
+- pr_err("No cache directory specified");
++ pr_err("No cache directory specified\n");
+ return -EINVAL;
+ }
+
+ /* don't permit already bound caches to be re-bound */
+ if (test_bit(CACHEFILES_READY, &cache->flags)) {
+- pr_err("Cache already bound");
++ pr_err("Cache already bound\n");
+ return -EBUSY;
+ }
+
+@@ -248,7 +246,7 @@ error_open_root:
+ kmem_cache_free(cachefiles_object_jar, fsdef);
+ error_root_object:
+ cachefiles_end_secure(cache, saved_cred);
+- pr_err("Failed to register: %d", ret);
++ pr_err("Failed to register: %d\n", ret);
+ return ret;
+ }
+
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
-index b078d30..db23012 100644
+index b078d30..4a6852c 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
@@ -61111,6 +61593,15 @@ index b078d30..db23012 100644
return -EOPNOTSUPP;
/* drag the command string into the kernel so we can parse it */
+@@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_poll(struct file *file,
+ static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
+ char *args)
+ {
+- pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%");
++ pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
+
+ return -EINVAL;
+ }
@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
@@ -61129,8 +61620,114 @@ index b078d30..db23012 100644
return cachefiles_daemon_range_error(cache, args);
cache->bstop_percent = bstop;
+@@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
+ _enter(",%s", args);
+
+ if (!*args) {
+- pr_err("Empty directory specified");
++ pr_err("Empty directory specified\n");
+ return -EINVAL;
+ }
+
+ if (cache->rootdirname) {
+- pr_err("Second cache directory specified");
++ pr_err("Second cache directory specified\n");
+ return -EEXIST;
+ }
+
+@@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
+ _enter(",%s", args);
+
+ if (!*args) {
+- pr_err("Empty security context specified");
++ pr_err("Empty security context specified\n");
+ return -EINVAL;
+ }
+
+ if (cache->secctx) {
+- pr_err("Second security context specified");
++ pr_err("Second security context specified\n");
+ return -EINVAL;
+ }
+
+@@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
+ _enter(",%s", args);
+
+ if (!*args) {
+- pr_err("Empty tag specified");
++ pr_err("Empty tag specified\n");
+ return -EINVAL;
+ }
+
+@@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
+ goto inval;
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags)) {
+- pr_err("cull applied to unready cache");
++ pr_err("cull applied to unready cache\n");
+ return -EIO;
+ }
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+- pr_err("cull applied to dead cache");
++ pr_err("cull applied to dead cache\n");
+ return -EIO;
+ }
+
+@@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
+
+ notdir:
+ path_put(&path);
+- pr_err("cull command requires dirfd to be a directory");
++ pr_err("cull command requires dirfd to be a directory\n");
+ return -ENOTDIR;
+
+ inval:
+- pr_err("cull command requires dirfd and filename");
++ pr_err("cull command requires dirfd and filename\n");
+ return -EINVAL;
+ }
+
+@@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
+ return 0;
+
+ inval:
+- pr_err("debug command requires mask");
++ pr_err("debug command requires mask\n");
+ return -EINVAL;
+ }
+
+@@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
+ goto inval;
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags)) {
+- pr_err("inuse applied to unready cache");
++ pr_err("inuse applied to unready cache\n");
+ return -EIO;
+ }
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+- pr_err("inuse applied to dead cache");
++ pr_err("inuse applied to dead cache\n");
+ return -EIO;
+ }
+
+@@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
+
+ notdir:
+ path_put(&path);
+- pr_err("inuse command requires dirfd to be a directory");
++ pr_err("inuse command requires dirfd to be a directory\n");
+ return -ENOTDIR;
+
+ inval:
+- pr_err("inuse command requires dirfd and filename");
++ pr_err("inuse command requires dirfd and filename\n");
+ return -EINVAL;
+ }
+
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
-index 3d50998..0550d67 100644
+index 3d50998..c4e3a69 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -66,7 +66,7 @@ struct cachefiles_cache {
@@ -61167,8 +61764,30 @@ index 3d50998..0550d67 100644
}
#else
+@@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+
+ #define cachefiles_io_error(___cache, FMT, ...) \
+ do { \
+- pr_err("I/O Error: " FMT, ##__VA_ARGS__); \
++ pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
+ fscache_io_error(&(___cache)->cache); \
+ set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+ } while (0)
+diff --git a/fs/cachefiles/main.c b/fs/cachefiles/main.c
+index 180edfb..711f13d 100644
+--- a/fs/cachefiles/main.c
++++ b/fs/cachefiles/main.c
+@@ -84,7 +84,7 @@ error_proc:
+ error_object_jar:
+ misc_deregister(&cachefiles_dev);
+ error_dev:
+- pr_err("failed to register: %d", ret);
++ pr_err("failed to register: %d\n", ret);
+ return ret;
+ }
+
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
-index 5bf2b41..85b93f9 100644
+index 5bf2b41..81051b4 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -312,7 +312,7 @@ try_again:
@@ -61180,6 +61799,67 @@ index 5bf2b41..85b93f9 100644
/* do the multiway lock magic */
trap = lock_rename(cache->graveyard, dir);
+@@ -543,7 +543,7 @@ lookup_again:
+ next, next->d_inode, next->d_inode->i_ino);
+
+ } else if (!S_ISDIR(next->d_inode->i_mode)) {
+- pr_err("inode %lu is not a directory",
++ pr_err("inode %lu is not a directory\n",
+ next->d_inode->i_ino);
+ ret = -ENOBUFS;
+ goto error;
+@@ -574,7 +574,7 @@ lookup_again:
+ } else if (!S_ISDIR(next->d_inode->i_mode) &&
+ !S_ISREG(next->d_inode->i_mode)
+ ) {
+- pr_err("inode %lu is not a file or directory",
++ pr_err("inode %lu is not a file or directory\n",
+ next->d_inode->i_ino);
+ ret = -ENOBUFS;
+ goto error;
+@@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
+ ASSERT(subdir->d_inode);
+
+ if (!S_ISDIR(subdir->d_inode->i_mode)) {
+- pr_err("%s is not a directory", dirname);
++ pr_err("%s is not a directory\n", dirname);
+ ret = -EIO;
+ goto check_error;
+ }
+@@ -795,13 +795,13 @@ check_error:
+ mkdir_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(subdir);
+- pr_err("mkdir %s failed with error %d", dirname, ret);
++ pr_err("mkdir %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+ lookup_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = PTR_ERR(subdir);
+- pr_err("Lookup %s failed with error %d", dirname, ret);
++ pr_err("Lookup %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+ nomem_d_alloc:
+@@ -891,7 +891,7 @@ lookup_error:
+ if (ret == -EIO) {
+ cachefiles_io_error(cache, "Lookup failed");
+ } else if (ret != -ENOMEM) {
+- pr_err("Internal error: %d", ret);
++ pr_err("Internal error: %d\n", ret);
+ ret = -EIO;
+ }
+
+@@ -950,7 +950,7 @@ error:
+ }
+
+ if (ret != -ENOMEM) {
+- pr_err("Internal error: %d", ret);
++ pr_err("Internal error: %d\n", ret);
+ ret = -EIO;
+ }
+
diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
index eccd339..4c1d995 100644
--- a/fs/cachefiles/proc.c
@@ -61223,6 +61903,54 @@ index 4b1fb5c..0d2a699 100644
set_fs(old_fs);
kunmap(page);
file_end_write(file);
+diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
+index 1ad51ffb..acbc1f0 100644
+--- a/fs/cachefiles/xattr.c
++++ b/fs/cachefiles/xattr.c
+@@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
+ }
+
+ if (ret != -EEXIST) {
+- pr_err("Can't set xattr on %*.*s [%lu] (err %d)",
++ pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ -ret);
+@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
+ if (ret == -ERANGE)
+ goto bad_type_length;
+
+- pr_err("Can't read xattr on %*.*s [%lu] (err %d)",
++ pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ -ret);
+@@ -85,14 +85,14 @@ error:
+ return ret;
+
+ bad_type_length:
+- pr_err("Cache object %lu type xattr length incorrect",
++ pr_err("Cache object %lu type xattr length incorrect\n",
+ dentry->d_inode->i_ino);
+ ret = -EIO;
+ goto error;
+
+ bad_type:
+ xtype[2] = 0;
+- pr_err("Cache object %*.*s [%lu] type %s not %s",
++ pr_err("Cache object %*.*s [%lu] type %s not %s\n",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ xtype, type);
+@@ -293,7 +293,7 @@ error:
+ return ret;
+
+ bad_type_length:
+- pr_err("Cache object %lu xattr length incorrect",
++ pr_err("Cache object %lu xattr length incorrect\n",
+ dentry->d_inode->i_ino);
+ ret = -EIO;
+ goto error;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index c29d6ae..719b9bb 100644
--- a/fs/ceph/dir.c
@@ -65602,7 +66330,7 @@ index d55297f..f5b28c5 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index 17ca8b8..2de9500 100644
+index 17ca8b8..d023ae5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -65657,7 +66385,42 @@ index 17ca8b8..2de9500 100644
return -EACCES;
}
EXPORT_SYMBOL(generic_permission);
-@@ -825,7 +832,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -644,24 +651,22 @@ static int complete_walk(struct nameidata *nd)
+
+ static __always_inline void set_root(struct nameidata *nd)
+ {
+- if (!nd->root.mnt)
+- get_fs_root(current->fs, &nd->root);
++ get_fs_root(current->fs, &nd->root);
+ }
+
+ static int link_path_walk(const char *, struct nameidata *);
+
+-static __always_inline void set_root_rcu(struct nameidata *nd)
++static __always_inline unsigned set_root_rcu(struct nameidata *nd)
+ {
+- if (!nd->root.mnt) {
+- struct fs_struct *fs = current->fs;
+- unsigned seq;
++ struct fs_struct *fs = current->fs;
++ unsigned seq, res;
+
+- do {
+- seq = read_seqcount_begin(&fs->seq);
+- nd->root = fs->root;
+- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
+- } while (read_seqcount_retry(&fs->seq, seq));
+- }
++ do {
++ seq = read_seqcount_begin(&fs->seq);
++ nd->root = fs->root;
++ res = __read_seqcount_begin(&nd->root.dentry->d_seq);
++ } while (read_seqcount_retry(&fs->seq, seq));
++ return res;
+ }
+
+ static void path_put_conditional(struct path *path, struct nameidata *nd)
+@@ -825,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
{
struct dentry *dentry = link->dentry;
int error;
@@ -65666,7 +66429,7 @@ index 17ca8b8..2de9500 100644
BUG_ON(nd->flags & LOOKUP_RCU);
-@@ -846,6 +853,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -846,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
if (error)
goto out_put_nd_path;
@@ -65679,7 +66442,126 @@ index 17ca8b8..2de9500 100644
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
-@@ -1597,6 +1610,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -861,7 +872,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+ return PTR_ERR(s);
+ }
+ if (*s == '/') {
+- set_root(nd);
++ if (!nd->root.mnt)
++ set_root(nd);
+ path_put(&nd->path);
+ nd->path = nd->root;
+ path_get(&nd->root);
+@@ -1092,10 +1104,10 @@ int follow_down_one(struct path *path)
+ }
+ EXPORT_SYMBOL(follow_down_one);
+
+-static inline bool managed_dentry_might_block(struct dentry *dentry)
++static inline int managed_dentry_rcu(struct dentry *dentry)
+ {
+- return (dentry->d_flags & DCACHE_MANAGE_TRANSIT &&
+- dentry->d_op->d_manage(dentry, true) < 0);
++ return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ?
++ dentry->d_op->d_manage(dentry, true) : 0;
+ }
+
+ /*
+@@ -1111,11 +1123,18 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+ * Don't forget we might have a non-mountpoint managed dentry
+ * that wants to block transit.
+ */
+- if (unlikely(managed_dentry_might_block(path->dentry)))
++ switch (managed_dentry_rcu(path->dentry)) {
++ case -ECHILD:
++ default:
+ return false;
+-
+- if (!d_mountpoint(path->dentry))
++ case -EISDIR:
+ return true;
++ case 0:
++ break;
++ }
++
++ if (!d_mountpoint(path->dentry))
++ return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
+
+ mounted = __lookup_mnt(path->mnt, path->dentry);
+ if (!mounted)
+@@ -1131,12 +1150,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+ */
+ *inode = path->dentry->d_inode;
+ }
+- return read_seqretry(&mount_lock, nd->m_seq);
++ return !read_seqretry(&mount_lock, nd->m_seq) &&
++ !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
+ }
+
+ static int follow_dotdot_rcu(struct nameidata *nd)
+ {
+- set_root_rcu(nd);
++ struct inode *inode = nd->inode;
++ if (!nd->root.mnt)
++ set_root_rcu(nd);
+
+ while (1) {
+ if (nd->path.dentry == nd->root.dentry &&
+@@ -1148,6 +1170,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ struct dentry *parent = old->d_parent;
+ unsigned seq;
+
++ inode = parent->d_inode;
+ seq = read_seqcount_begin(&parent->d_seq);
+ if (read_seqcount_retry(&old->d_seq, nd->seq))
+ goto failed;
+@@ -1157,6 +1180,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ }
+ if (!follow_up_rcu(&nd->path))
+ break;
++ inode = nd->path.dentry->d_inode;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+ }
+ while (d_mountpoint(nd->path.dentry)) {
+@@ -1166,11 +1190,12 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ break;
+ nd->path.mnt = &mounted->mnt;
+ nd->path.dentry = mounted->mnt.mnt_root;
++ inode = nd->path.dentry->d_inode;
+ nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+- if (!read_seqretry(&mount_lock, nd->m_seq))
++ if (read_seqretry(&mount_lock, nd->m_seq))
+ goto failed;
+ }
+- nd->inode = nd->path.dentry->d_inode;
++ nd->inode = inode;
+ return 0;
+
+ failed:
+@@ -1249,7 +1274,8 @@ static void follow_mount(struct path *path)
+
+ static void follow_dotdot(struct nameidata *nd)
+ {
+- set_root(nd);
++ if (!nd->root.mnt)
++ set_root(nd);
+
+ while(1) {
+ struct dentry *old = nd->path.dentry;
+@@ -1403,11 +1429,8 @@ static int lookup_fast(struct nameidata *nd,
+ }
+ path->mnt = mnt;
+ path->dentry = dentry;
+- if (unlikely(!__follow_mount_rcu(nd, path, inode)))
+- goto unlazy;
+- if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+- goto unlazy;
+- return 0;
++ if (likely(__follow_mount_rcu(nd, path, inode)))
++ return 0;
+ unlazy:
+ if (unlazy_walk(nd, dentry))
+ return -ECHILD;
+@@ -1597,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
@@ -65688,7 +66570,7 @@ index 17ca8b8..2de9500 100644
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1669,7 +1684,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1669,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -65697,7 +66579,32 @@ index 17ca8b8..2de9500 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -1953,6 +1968,8 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1847,7 +1872,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+ if (*name=='/') {
+ if (flags & LOOKUP_RCU) {
+ rcu_read_lock();
+- set_root_rcu(nd);
++ nd->seq = set_root_rcu(nd);
+ } else {
+ set_root(nd);
+ path_get(&nd->root);
+@@ -1898,7 +1923,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+ }
+
+ nd->inode = nd->path.dentry->d_inode;
+- return 0;
++ if (!(flags & LOOKUP_RCU))
++ return 0;
++ if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
++ return 0;
++ if (!(nd->flags & LOOKUP_ROOT))
++ nd->root.mnt = NULL;
++ rcu_read_unlock();
++ return -ECHILD;
+ }
+
+ static inline int lookup_last(struct nameidata *nd, struct path *path)
+@@ -1953,6 +1985,8 @@ static int path_lookupat(int dfd, const char *name,
if (err)
break;
err = lookup_last(nd, &path);
@@ -65706,7 +66613,7 @@ index 17ca8b8..2de9500 100644
put_link(nd, &link, cookie);
}
}
-@@ -1960,6 +1977,13 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1960,6 +1994,13 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -65720,7 +66627,7 @@ index 17ca8b8..2de9500 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
-@@ -1987,8 +2011,15 @@ static int filename_lookup(int dfd, struct filename *name,
+@@ -1987,8 +2028,15 @@ static int filename_lookup(int dfd, struct filename *name,
retval = path_lookupat(dfd, name->name,
flags | LOOKUP_REVAL, nd);
@@ -65737,7 +66644,7 @@ index 17ca8b8..2de9500 100644
return retval;
}
-@@ -2570,6 +2601,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2570,6 +2618,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -65751,7 +66658,7 @@ index 17ca8b8..2de9500 100644
return 0;
}
-@@ -2801,7 +2839,7 @@ looked_up:
+@@ -2801,7 +2856,7 @@ looked_up:
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
@@ -65760,7 +66667,7 @@ index 17ca8b8..2de9500 100644
const struct open_flags *op,
bool got_write, int *opened)
{
-@@ -2836,6 +2874,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2836,6 +2891,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -65778,7 +66685,7 @@ index 17ca8b8..2de9500 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2857,6 +2906,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2857,6 +2923,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -65787,7 +66694,7 @@ index 17ca8b8..2de9500 100644
}
out_no_open:
path->dentry = dentry;
-@@ -2871,7 +2922,7 @@ out_dput:
+@@ -2871,7 +2939,7 @@ out_dput:
/*
* Handle the last step of open()
*/
@@ -65796,7 +66703,7 @@ index 17ca8b8..2de9500 100644
struct file *file, const struct open_flags *op,
int *opened, struct filename *name)
{
-@@ -2921,6 +2972,15 @@ static int do_last(struct nameidata *nd, struct path *path,
+@@ -2921,6 +2989,15 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
@@ -65812,7 +66719,7 @@ index 17ca8b8..2de9500 100644
audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
-@@ -2940,7 +3000,7 @@ retry_lookup:
+@@ -2940,7 +3017,7 @@ retry_lookup:
*/
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -65821,7 +66728,7 @@ index 17ca8b8..2de9500 100644
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
-@@ -2964,11 +3024,28 @@ retry_lookup:
+@@ -2964,11 +3041,28 @@ retry_lookup:
goto finish_open_created;
}
@@ -65851,7 +66758,7 @@ index 17ca8b8..2de9500 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -3009,6 +3086,11 @@ finish_lookup:
+@@ -3009,6 +3103,11 @@ finish_lookup:
}
}
BUG_ON(inode != path->dentry->d_inode);
@@ -65863,7 +66770,7 @@ index 17ca8b8..2de9500 100644
return 1;
}
-@@ -3018,7 +3100,6 @@ finish_lookup:
+@@ -3018,7 +3117,6 @@ finish_lookup:
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path->mnt);
nd->path.dentry = path->dentry;
@@ -65871,7 +66778,7 @@ index 17ca8b8..2de9500 100644
}
nd->inode = inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
-@@ -3028,7 +3109,18 @@ finish_open:
+@@ -3028,7 +3126,18 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -65890,7 +66797,7 @@ index 17ca8b8..2de9500 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3191,7 +3283,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3191,7 +3300,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -65899,7 +66806,7 @@ index 17ca8b8..2de9500 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3209,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3209,7 +3318,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -65908,7 +66815,7 @@ index 17ca8b8..2de9500 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3309,9 +3401,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3309,9 +3418,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -65922,7 +66829,7 @@ index 17ca8b8..2de9500 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3363,6 +3457,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3363,6 +3474,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -65943,7 +66850,7 @@ index 17ca8b8..2de9500 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3426,6 +3534,17 @@ retry:
+@@ -3426,6 +3551,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -65961,16 +66868,16 @@ index 17ca8b8..2de9500 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3442,6 +3561,8 @@ retry:
+@@ -3441,6 +3577,8 @@ retry:
+ error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
- out:
+ if (!error)
+ gr_handle_create(dentry, path.mnt);
+ out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
-@@ -3495,9 +3616,16 @@ retry:
+@@ -3495,9 +3633,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -65987,7 +66894,7 @@ index 17ca8b8..2de9500 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3580,6 +3708,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3580,6 +3725,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -65996,7 +66903,7 @@ index 17ca8b8..2de9500 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3612,10 +3742,21 @@ retry:
+@@ -3612,10 +3759,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -66018,7 +66925,7 @@ index 17ca8b8..2de9500 100644
exit3:
dput(dentry);
exit2:
-@@ -3706,6 +3847,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3706,6 +3864,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -66027,7 +66934,7 @@ index 17ca8b8..2de9500 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3732,10 +3875,22 @@ retry_deleg:
+@@ -3732,10 +3892,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -66050,7 +66957,7 @@ index 17ca8b8..2de9500 100644
exit2:
dput(dentry);
}
-@@ -3824,9 +3979,17 @@ retry:
+@@ -3824,9 +3996,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -66068,7 +66975,7 @@ index 17ca8b8..2de9500 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3930,6 +4093,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3930,6 +4110,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -66076,7 +66983,7 @@ index 17ca8b8..2de9500 100644
int how = 0;
int error;
-@@ -3953,7 +4117,7 @@ retry:
+@@ -3953,7 +4134,7 @@ retry:
if (error)
return error;
@@ -66085,7 +66992,7 @@ index 17ca8b8..2de9500 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3965,11 +4129,28 @@ retry:
+@@ -3965,11 +4146,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -66114,7 +67021,7 @@ index 17ca8b8..2de9500 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4279,6 +4460,12 @@ retry_deleg:
+@@ -4279,6 +4477,12 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -66127,7 +67034,7 @@ index 17ca8b8..2de9500 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry, flags);
if (error)
-@@ -4286,6 +4473,9 @@ retry_deleg:
+@@ -4286,6 +4490,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode, flags);
@@ -66137,7 +67044,7 @@ index 17ca8b8..2de9500 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4328,14 +4518,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4328,14 +4535,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
@@ -99947,6 +100854,19 @@ index 1706cbb..f89dbca 100644
if (err) {
bdi_destroy(bdi);
return err;
+diff --git a/mm/dmapool.c b/mm/dmapool.c
+index 306baa5..ba8019b 100644
+--- a/mm/dmapool.c
++++ b/mm/dmapool.c
+@@ -176,7 +176,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+ if (list_empty(&dev->dma_pools) &&
+ device_create_file(dev, &dev_attr_pools)) {
+ kfree(retval);
+- return NULL;
++ retval = NULL;
+ } else
+ list_add(&retval->pools, &dev->dma_pools);
+ mutex_unlock(&pools_lock);
diff --git a/mm/filemap.c b/mm/filemap.c
index 8163e04..191cb97 100644
--- a/mm/filemap.c
@@ -103283,7 +104203,7 @@ index 22a4a76..9551288 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index af68b15..1227320 100644
+index af68b15..f7f853d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -103304,7 +104224,19 @@ index af68b15..1227320 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -2219,6 +2219,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2064,8 +2064,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
+
+ if (new_dentry->d_inode) {
+ (void) shmem_unlink(new_dir, new_dentry);
+- if (they_are_dirs)
++ if (they_are_dirs) {
++ drop_nlink(new_dentry->d_inode);
+ drop_nlink(old_dir);
++ }
+ } else if (they_are_dirs) {
+ drop_nlink(old_dir);
+ inc_nlink(new_dir);
+@@ -2219,6 +2221,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -103316,7 +104248,7 @@ index af68b15..1227320 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2274,6 +2279,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2274,6 +2281,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -103332,7 +104264,7 @@ index af68b15..1227320 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -2586,8 +2600,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2586,8 +2602,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -103343,7 +104275,7 @@ index af68b15..1227320 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 3070b92..bcfff83 100644
+index 3070b92..6596d86 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -311,10 +311,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -103396,7 +104328,32 @@ index 3070b92..bcfff83 100644
slab_early_init = 0;
-@@ -3512,6 +3516,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -2224,7 +2228,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
+ int
+ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ {
+- size_t left_over, freelist_size, ralign;
++ size_t left_over, freelist_size;
++ size_t ralign = BYTES_PER_WORD;
+ gfp_t gfp;
+ int err;
+ size_t size = cachep->size;
+@@ -2257,14 +2262,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ size &= ~(BYTES_PER_WORD - 1);
+ }
+
+- /*
+- * Redzoning and user store require word alignment or possibly larger.
+- * Note this will be overridden by architecture or caller mandated
+- * alignment if either is greater than BYTES_PER_WORD.
+- */
+- if (flags & SLAB_STORE_USER)
+- ralign = BYTES_PER_WORD;
+-
+ if (flags & SLAB_RED_ZONE) {
+ ralign = REDZONE_ALIGN;
+ /* If redzoning, ensure that the second redzone is suitably
+@@ -3512,6 +3509,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
@@ -103418,7 +104375,7 @@ index 3070b92..bcfff83 100644
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3735,6 +3754,7 @@ void kfree(const void *objp)
+@@ -3735,6 +3747,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -103426,7 +104383,7 @@ index 3070b92..bcfff83 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4176,14 +4196,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+@@ -4176,14 +4189,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
}
/* cpu stats */
{
@@ -103453,7 +104410,7 @@ index 3070b92..bcfff83 100644
#endif
}
-@@ -4404,13 +4432,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4404,13 +4425,69 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -113900,11 +114857,11 @@ index b304068..462d24e 100644
fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
new file mode 100644
-index 0000000..3fd3699
+index 0000000..42018ed
--- /dev/null
+++ b/scripts/gcc-plugin.sh
-@@ -0,0 +1,43 @@
-+#!/bin/bash
+@@ -0,0 +1,51 @@
++#!/bin/sh
+srctree=$(dirname "$0")
+gccplugins_dir=$($3 -print-file-name=plugin)
+plugincc=$($1 -E - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
@@ -113922,15 +114879,23 @@ index 0000000..3fd3699
+ exit 1
+fi
+
-+if [[ "$plugincc" =~ "$1 CC" ]]
-+then
-+ echo "$1"
-+ exit 0
-+fi
++case "$plugincc" in
++ *"$1 CC"*)
++ echo "$1"
++ exit 0
++ ;;
+
-+if [[ "$plugincc" =~ "$2 CXX" ]]
-+then
-+plugincc=$($1 -c -x c++ -std=gnu++98 - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
++ *"$2 CXX"*)
++ # the c++ compiler needs another test, see below
++ ;;
++
++ *)
++ exit 1
++ ;;
++esac
++
++# we need a c++ compiler that supports the designated initializer GNU extension
++plugincc=$($2 -c -x c++ -std=gnu++98 - -fsyntax-only -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
+#include "gcc-common.h"
+class test {
+public:
@@ -113940,12 +114905,12 @@ index 0000000..3fd3699
+};
+EOF
+)
++
+if [ $? -eq 0 ]
+then
+ echo "$2"
+ exit 0
+fi
-+fi
+exit 1
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 5de5660..d3deb89 100644
@@ -114229,6 +115194,19 @@ index 8fac3fd..32ff38d 100644
unsigned int num_sections;
unsigned int secindex_strings;
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index e6b011f..2d5f70f 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -26,7 +26,7 @@ else
+ fi
+
+ # ignore userspace tools
+-ignore="$ignore ( -path ${tree}tools ) -prune -o"
++ignore="$ignore ( -path \"${tree}tools/[^g]*\" ) -prune -o"
+
+ # Find all available archs
+ find_all_archs()
diff --git a/security/Kconfig b/security/Kconfig
index beb86b5..9becb4a 100644
--- a/security/Kconfig
diff --git a/3.2.63/0000_README b/3.2.63/0000_README
index 251218e..90399c4 100644
--- a/3.2.63/0000_README
+++ b/3.2.63/0000_README
@@ -170,7 +170,7 @@ Patch: 1062_linux-3.2.63.patch
From: http://www.kernel.org
Desc: Linux 3.2.63
-Patch: 4420_grsecurity-3.0-3.2.63-201409180857.patch
+Patch: 4420_grsecurity-3.0-3.2.63-201409282020.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.63/4420_grsecurity-3.0-3.2.63-201409180857.patch b/3.2.63/4420_grsecurity-3.0-3.2.63-201409282020.patch
index 3cee459..e277955 100644
--- a/3.2.63/4420_grsecurity-3.0-3.2.63-201409180857.patch
+++ b/3.2.63/4420_grsecurity-3.0-3.2.63-201409282020.patch
@@ -6205,20 +6205,43 @@ index 5efe8c9..db9ceef 100644
return 0;
}
diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
-index 5a783d8..fbe4c8b 100644
+index 5a783d8..522eb00 100644
--- a/arch/powerpc/mm/mmap_64.c
+++ b/arch/powerpc/mm/mmap_64.c
-@@ -65,6 +65,10 @@ static unsigned long mmap_rnd(void)
+@@ -61,10 +61,14 @@ static inline int mmap_is_legacy(void)
+ *
+ * To avoid this we can shift the randomness by 1 bit.
+ */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
{
unsigned long rnd = 0;
+#ifdef CONFIG_PAX_RANDMMAP
-+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
if (current->flags & PF_RANDOMIZE) {
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
+@@ -75,7 +79,7 @@ static unsigned long mmap_rnd(void)
+ return (rnd << PAGE_SHIFT) * 2;
+ }
+
+-static inline unsigned long mmap_base(void)
++static inline unsigned long mmap_base(struct mm_struct *mm)
+ {
+ unsigned long gap = rlimit(RLIMIT_STACK);
+
+@@ -84,7 +88,7 @@ static inline unsigned long mmap_base(void)
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
++ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
+ }
+
+ /*
@@ -99,10 +103,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
*/
if (mmap_is_legacy()) {
@@ -6232,7 +6255,8 @@ index 5a783d8..fbe4c8b 100644
mm->get_unmapped_area = arch_get_unmapped_area;
mm->unmap_area = arch_unmap_area;
} else {
- mm->mmap_base = mmap_base();
+- mm->mmap_base = mmap_base();
++ mm->mmap_base = mmap_base(mm);
+
+#ifdef CONFIG_PAX_RANDMMAP
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
@@ -16877,7 +16901,7 @@ index 103b6ab..2004d0a 100644
#endif
initial_code = (unsigned long)wakeup_long64;
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
-index 13ab720..95d5442 100644
+index 13ab7205..95d5442 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -30,13 +30,11 @@ wakeup_pmode_return:
@@ -34144,8 +34168,21 @@ index be73e9d..7fbf140 100644
cmdlist_t *reqQ;
cmdlist_t *cmpQ;
+diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
+index 912f585..610551e 100644
+--- a/drivers/block/drbd/drbd_bitmap.c
++++ b/drivers/block/drbd/drbd_bitmap.c
+@@ -992,7 +992,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
+ submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+- atomic_add(len >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(len >> 9, &mdev->rs_sect_ev);
+ }
+ }
+
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index 9cf2035..bffca95 100644
+index 9cf2035..c8cbfe1 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -736,7 +736,7 @@ struct drbd_request;
@@ -34166,6 +34203,17 @@ index 9cf2035..bffca95 100644
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned int minor;
+@@ -1118,8 +1118,8 @@ struct drbd_conf {
+ u64 ed_uuid; /* UUID of the exposed data */
+ struct mutex state_mutex;
+ char congestion_reason; /* Why we where congested... */
+- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
+- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
++ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
++ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
+ int rs_last_sect_ev; /* counter to compare with */
+ int rs_last_events; /* counter of read or write "events" (unit sectors)
+ * on the lower level device when we last looked. */
@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
static inline void drbd_tcp_cork(struct socket *sock)
@@ -34206,7 +34254,7 @@ index 9cf2035..bffca95 100644
void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index 0358e55..bc33689 100644
+index 0358e55..4e25ed1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
@@ -34227,7 +34275,7 @@ index 0358e55..bc33689 100644
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
-@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
+@@ -2981,11 +2981,11 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->unacked_cnt, 0);
atomic_set(&mdev->local_cnt, 0);
atomic_set(&mdev->net_cnt, 0);
@@ -34235,7 +34283,13 @@ index 0358e55..bc33689 100644
+ atomic_set_unchecked(&mdev->packet_seq, 0);
atomic_set(&mdev->pp_in_use, 0);
atomic_set(&mdev->pp_in_use_by_net, 0);
- atomic_set(&mdev->rs_sect_in, 0);
+- atomic_set(&mdev->rs_sect_in, 0);
+- atomic_set(&mdev->rs_sect_ev, 0);
++ atomic_set_unchecked(&mdev->rs_sect_in, 0);
++ atomic_set_unchecked(&mdev->rs_sect_ev, 0);
+ atomic_set(&mdev->ap_in_flight, 0);
+
+ mutex_init(&mdev->md_io_mutex);
@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
mdev->receiver.t_state);
@@ -34306,7 +34360,7 @@ index af2a250..0fdeb75 100644
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
(int)((char *)tl - (char *)reply->tag_list);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
-index 13cbdd3..d374957 100644
+index 13cbdd3..9c663ab 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -894,7 +894,7 @@ retry:
@@ -34367,6 +34421,24 @@ index 13cbdd3..d374957 100644
list_add(&epoch->list, &mdev->current_epoch->list);
mdev->current_epoch = epoch;
mdev->epochs++;
+@@ -1449,7 +1449,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
+ list_add(&e->w.list, &mdev->sync_ee);
+ spin_unlock_irq(&mdev->req_lock);
+
+- atomic_add(data_size >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(data_size >> 9, &mdev->rs_sect_ev);
+ if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
+ return true;
+
+@@ -1519,7 +1519,7 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
+ drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
+ }
+
+- atomic_add(data_size >> 9, &mdev->rs_sect_in);
++ atomic_add_unchecked(data_size >> 9, &mdev->rs_sect_in);
+
+ return ok;
+ }
@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
spin_unlock(&mdev->peer_seq_lock);
@@ -34385,6 +34457,33 @@ index 13cbdd3..d374957 100644
atomic_inc(&e->epoch->active);
spin_unlock(&mdev->epoch_lock);
+@@ -1906,7 +1906,7 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
+
+ curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
+ (int)part_stat_read(&disk->part0, sectors[1]) -
+- atomic_read(&mdev->rs_sect_ev);
++ atomic_read_unchecked(&mdev->rs_sect_ev);
+
+ if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
+ unsigned long rs_left;
+@@ -2034,7 +2034,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
+ mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
+ } else if (cmd == P_OV_REPLY) {
+ /* track progress, we may need to throttle */
+- atomic_add(size >> 9, &mdev->rs_sect_in);
++ atomic_add_unchecked(size >> 9, &mdev->rs_sect_in);
+ e->w.cb = w_e_end_ov_reply;
+ dec_rs_pending(mdev);
+ /* drbd_rs_begin_io done when we sent this request,
+@@ -2098,7 +2098,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
+ goto out_free_e;
+
+ submit_for_resync:
+- atomic_add(size >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(size >> 9, &mdev->rs_sect_ev);
+
+ submit:
+ inc_unacked(mdev);
@@ -3637,7 +3637,7 @@ struct data_cmd {
int expect_payload;
size_t pkt_size;
@@ -34403,6 +34502,15 @@ index 13cbdd3..d374957 100644
D_ASSERT(list_empty(&mdev->current_epoch->list));
}
+@@ -4240,7 +4240,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
+ put_ldev(mdev);
+ }
+ dec_rs_pending(mdev);
+- atomic_add(blksize >> 9, &mdev->rs_sect_in);
++ atomic_add_unchecked(blksize >> 9, &mdev->rs_sect_in);
+
+ return true;
+ }
@@ -4492,7 +4492,7 @@ static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
struct asender_cmd {
size_t pkt_size;
@@ -34412,6 +34520,39 @@ index 13cbdd3..d374957 100644
static struct asender_cmd *get_asender_cmd(int cmd)
{
+diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
+index 4d3e6f6..5555fc4 100644
+--- a/drivers/block/drbd/drbd_worker.c
++++ b/drivers/block/drbd/drbd_worker.c
+@@ -368,7 +368,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
+ list_add(&e->w.list, &mdev->read_ee);
+ spin_unlock_irq(&mdev->req_lock);
+
+- atomic_add(size >> 9, &mdev->rs_sect_ev);
++ atomic_add_unchecked(size >> 9, &mdev->rs_sect_ev);
+ if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
+ return 0;
+
+@@ -448,7 +448,7 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
+ int curr_corr;
+ int max_sect;
+
+- sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
++ sect_in = atomic_xchg_unchecked(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
+ mdev->rs_in_flight -= sect_in;
+
+ spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
+@@ -1455,8 +1455,8 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
+
+ void drbd_rs_controller_reset(struct drbd_conf *mdev)
+ {
+- atomic_set(&mdev->rs_sect_in, 0);
+- atomic_set(&mdev->rs_sect_ev, 0);
++ atomic_set_unchecked(&mdev->rs_sect_in, 0);
++ atomic_set_unchecked(&mdev->rs_sect_ev, 0);
+ mdev->rs_in_flight = 0;
+ mdev->rs_planed = 0;
+ spin_lock(&mdev->peer_seq_lock);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d659135..45fe633 100644
--- a/drivers/block/loop.c
@@ -47584,6 +47725,32 @@ index 9de9db2..1e09660 100644
fc_frame_free(fp);
}
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 143bbe4..2794a30 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -718,11 +718,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ return NULL;
+ }
+
++ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
++ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
++ return NULL;
++ }
++
+ task = conn->login_task;
+ } else {
+ if (session->state != ISCSI_STATE_LOGGED_IN)
+ return NULL;
+
++ if (data_size != 0) {
++ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
++ return NULL;
++ }
++
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 5e170e3..1e87efc 100644
--- a/drivers/scsi/libsas/sas_ata.c
@@ -56287,10 +56454,30 @@ index 200f63b..490b833 100644
/*
* used by btrfsctl to scan devices when no FS is mounted
diff --git a/fs/buffer.c b/fs/buffer.c
-index 5f4bde2..9dffef0 100644
+index 5f4bde2..b4d23b3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -3316,7 +3316,7 @@ void __init buffer_init(void)
+@@ -1021,7 +1021,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ bh = page_buffers(page);
+ if (bh->b_size == size) {
+ end_block = init_page_buffers(page, bdev,
+- index << sizebits, size);
++ (sector_t)index << sizebits,
++ size);
+ goto done;
+ }
+ if (!try_to_free_buffers(page))
+@@ -1042,7 +1043,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ */
+ spin_lock(&inode->i_mapping->private_lock);
+ link_dev_buffers(page, bh);
+- end_block = init_page_buffers(page, bdev, index << sizebits, size);
++ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
++ size);
+ spin_unlock(&inode->i_mapping->private_lock);
+ done:
+ ret = (block < end_block) ? 1 : -ENXIO;
+@@ -3316,7 +3318,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -98170,7 +98357,7 @@ index f3f6fd3..0d91a63 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index 1371021..c2094c7 100644
+index 1371021..7104960 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -31,7 +31,7 @@
@@ -98191,7 +98378,19 @@ index 1371021..c2094c7 100644
/*
* vmtruncate_range() communicates with shmem_fault via
-@@ -1924,6 +1924,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -1719,8 +1719,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
+
+ if (new_dentry->d_inode) {
+ (void) shmem_unlink(new_dir, new_dentry);
+- if (they_are_dirs)
++ if (they_are_dirs) {
++ drop_nlink(new_dentry->d_inode);
+ drop_nlink(old_dir);
++ }
+ } else if (they_are_dirs) {
+ drop_nlink(old_dir);
+ inc_nlink(new_dir);
+@@ -1924,6 +1926,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -98203,7 +98402,7 @@ index 1371021..c2094c7 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -1977,6 +1982,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -1977,6 +1984,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -98219,7 +98418,7 @@ index 1371021..c2094c7 100644
if (size == 0)
value = ""; /* empty EA, do not remove */
-@@ -2310,8 +2324,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2310,8 +2326,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -108226,11 +108425,11 @@ index cb1f50c..cef2a7c 100644
fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
new file mode 100644
-index 0000000..3fd3699
+index 0000000..42018ed
--- /dev/null
+++ b/scripts/gcc-plugin.sh
-@@ -0,0 +1,43 @@
-+#!/bin/bash
+@@ -0,0 +1,51 @@
++#!/bin/sh
+srctree=$(dirname "$0")
+gccplugins_dir=$($3 -print-file-name=plugin)
+plugincc=$($1 -E - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
@@ -108248,15 +108447,23 @@ index 0000000..3fd3699
+ exit 1
+fi
+
-+if [[ "$plugincc" =~ "$1 CC" ]]
-+then
-+ echo "$1"
-+ exit 0
-+fi
++case "$plugincc" in
++ *"$1 CC"*)
++ echo "$1"
++ exit 0
++ ;;
+
-+if [[ "$plugincc" =~ "$2 CXX" ]]
-+then
-+plugincc=$($1 -c -x c++ -std=gnu++98 - -o /dev/null -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
++ *"$2 CXX"*)
++ # the c++ compiler needs another test, see below
++ ;;
++
++ *)
++ exit 1
++ ;;
++esac
++
++# we need a c++ compiler that supports the designated initializer GNU extension
++plugincc=$($2 -c -x c++ -std=gnu++98 - -fsyntax-only -I${srctree}/../tools/gcc -I${gccplugins_dir}/include 2>&1 <<EOF
+#include "gcc-common.h"
+class test {
+public:
@@ -108266,12 +108473,12 @@ index 0000000..3fd3699
+};
+EOF
+)
++
+if [ $? -eq 0 ]
+then
+ echo "$2"
+ exit 0
+fi
-+fi
+exit 1
diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
index 48462be..3e08f94 100644