summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-07-26 10:58:55 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-07-26 19:43:08 -0400
commitc87afd661b02b834186274edcada73b95b8425c2 (patch)
treedb21faea01247167672967e4e210a0f7ede984ce
parentGrsec/PaX: 3.1-{3.2.69,3.14.48,4.0.8}-201507111211 (diff)
downloadhardened-patchset-c87afd661b02b834186274edcada73b95b8425c2.tar.gz
hardened-patchset-c87afd661b02b834186274edcada73b95b8425c2.tar.bz2
hardened-patchset-c87afd661b02b834186274edcada73b95b8425c2.zip
grsecurity-{3.2.69,3.14.48,4.1.3}-201507251419
-rw-r--r--3.14.48/0000_README6
-rw-r--r--3.14.48/1046_linux-3.14.47.patch1395
-rw-r--r--3.14.48/4420_grsecurity-3.1-3.14.48-201507251417.patch (renamed from 3.14.48/4420_grsecurity-3.1-3.14.48-201507111210.patch)2066
-rw-r--r--3.2.69/0000_README2
-rw-r--r--3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch (renamed from 3.2.69/4420_grsecurity-3.1-3.2.69-201507111207.patch)4612
-rw-r--r--4.0.8/1007_linux-4.0.8.patch2139
-rw-r--r--4.1.3/0000_README (renamed from 4.0.8/0000_README)6
-rw-r--r--4.1.3/4420_grsecurity-3.1-4.1.3-201507251419.patch (renamed from 4.0.8/4420_grsecurity-3.1-4.0.8-201507111211.patch)13925
-rw-r--r--4.1.3/4425_grsec_remove_EI_PAX.patch (renamed from 4.0.8/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.1.3/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.0.8/4427_force_XATTR_PAX_tmpfs.patch)0
-rw-r--r--4.1.3/4430_grsec-remove-localversion-grsec.patch (renamed from 4.0.8/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.1.3/4435_grsec-mute-warnings.patch (renamed from 4.0.8/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.1.3/4440_grsec-remove-protected-paths.patch (renamed from 4.0.8/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.1.3/4450_grsec-kconfig-default-gids.patch (renamed from 4.0.8/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.1.3/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.0.8/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.1.3/4470_disable-compat_vdso.patch (renamed from 4.0.8/4470_disable-compat_vdso.patch)0
-rw-r--r--4.1.3/4475_emutramp_default_on.patch (renamed from 4.0.8/4475_emutramp_default_on.patch)0
17 files changed, 13081 insertions, 11070 deletions
diff --git a/3.14.48/0000_README b/3.14.48/0000_README
index 44ff3ab..e4ab7cb 100644
--- a/3.14.48/0000_README
+++ b/3.14.48/0000_README
@@ -2,15 +2,11 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1046_linux-3.14.47.patch
-From: http://www.kernel.org
-Desc: Linux 3.14.47
-
Patch: 1047_linux-3.14.48.patch
From: http://www.kernel.org
Desc: Linux 3.14.48
-Patch: 4420_grsecurity-3.1-3.14.48-201507111210.patch
+Patch: 4420_grsecurity-3.1-3.14.48-201507251417.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.48/1046_linux-3.14.47.patch b/3.14.48/1046_linux-3.14.47.patch
deleted file mode 100644
index 4dc0c5a..0000000
--- a/3.14.48/1046_linux-3.14.47.patch
+++ /dev/null
@@ -1,1395 +0,0 @@
-diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
-index 6cd63a9..bc6d617 100644
---- a/Documentation/virtual/kvm/api.txt
-+++ b/Documentation/virtual/kvm/api.txt
-@@ -2344,7 +2344,8 @@ should be created before this ioctl is invoked.
-
- Possible features:
- - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
-- Depends on KVM_CAP_ARM_PSCI.
-+ Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
-+ and execute guest code when KVM_RUN is called.
- - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
- Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
-
-diff --git a/Makefile b/Makefile
-index def39fd..f9041e6 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 3
- PATCHLEVEL = 14
--SUBLEVEL = 46
-+SUBLEVEL = 47
- EXTRAVERSION =
- NAME = Remembering Coco
-
-diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
-index 0fa90c9..853e2be 100644
---- a/arch/arm/include/asm/kvm_emulate.h
-+++ b/arch/arm/include/asm/kvm_emulate.h
-@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
- void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
- void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
-
-+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-+{
-+ vcpu->arch.hcr = HCR_GUEST_MASK;
-+}
-+
- static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
- {
- return 1;
-diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
-index 0cbdb8e..9f79231 100644
---- a/arch/arm/include/asm/kvm_mmu.h
-+++ b/arch/arm/include/asm/kvm_mmu.h
-@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
- void free_boot_hyp_pgd(void);
- void free_hyp_pgds(void);
-
-+void stage2_unmap_vm(struct kvm *kvm);
- int kvm_alloc_stage2_pgd(struct kvm *kvm);
- void kvm_free_stage2_pgd(struct kvm *kvm);
- int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-@@ -78,17 +79,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
- flush_pmd_entry(pte);
- }
-
--static inline bool kvm_is_write_fault(unsigned long hsr)
--{
-- unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
-- if (hsr_ec == HSR_EC_IABT)
-- return false;
-- else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
-- return false;
-- else
-- return true;
--}
--
- static inline void kvm_clean_pgd(pgd_t *pgd)
- {
- clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
-diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index df6e75e..2e74a61 100644
---- a/arch/arm/kvm/arm.c
-+++ b/arch/arm/kvm/arm.c
-@@ -220,6 +220,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
- int err;
- struct kvm_vcpu *vcpu;
-
-+ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
-+ err = -EBUSY;
-+ goto out;
-+ }
-+
- vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- if (!vcpu) {
- err = -ENOMEM;
-@@ -427,9 +432,9 @@ static void update_vttbr(struct kvm *kvm)
-
- /* update vttbr to be used with the new vmid */
- pgd_phys = virt_to_phys(kvm->arch.pgd);
-+ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
- vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
-- kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
-- kvm->arch.vttbr |= vmid;
-+ kvm->arch.vttbr = pgd_phys | vmid;
-
- spin_unlock(&kvm_vmid_lock);
- }
-@@ -676,10 +681,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
- return ret;
-
- /*
-+ * Ensure a rebooted VM will fault in RAM pages and detect if the
-+ * guest MMU is turned off and flush the caches as needed.
-+ */
-+ if (vcpu->arch.has_run_once)
-+ stage2_unmap_vm(vcpu->kvm);
-+
-+ vcpu_reset_hcr(vcpu);
-+
-+ /*
- * Handle the "start in power-off" case by marking the VCPU as paused.
- */
-- if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
-+ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- vcpu->arch.pause = true;
-+ else
-+ vcpu->arch.pause = false;
-
- return 0;
- }
-@@ -825,7 +841,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self,
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
-- cpu_init_hyp_mode(NULL);
-+ if (__hyp_get_vectors() == hyp_default_vectors)
-+ cpu_init_hyp_mode(NULL);
- break;
- }
-
-diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
-index b23a59c..2786eae 100644
---- a/arch/arm/kvm/guest.c
-+++ b/arch/arm/kvm/guest.c
-@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
-
- int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
- {
-- vcpu->arch.hcr = HCR_GUEST_MASK;
- return 0;
- }
-
-diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
-index 70ed2c1..524b4b5 100644
---- a/arch/arm/kvm/mmu.c
-+++ b/arch/arm/kvm/mmu.c
-@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
- pgd = pgdp + pgd_index(addr);
- do {
- next = kvm_pgd_addr_end(addr, end);
-- unmap_puds(kvm, pgd, addr, next);
-+ if (!pgd_none(*pgd))
-+ unmap_puds(kvm, pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
- }
-
-@@ -555,6 +556,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
- unmap_range(kvm, kvm->arch.pgd, start, size);
- }
-
-+static void stage2_unmap_memslot(struct kvm *kvm,
-+ struct kvm_memory_slot *memslot)
-+{
-+ hva_t hva = memslot->userspace_addr;
-+ phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
-+ phys_addr_t size = PAGE_SIZE * memslot->npages;
-+ hva_t reg_end = hva + size;
-+
-+ /*
-+ * A memory region could potentially cover multiple VMAs, and any holes
-+ * between them, so iterate over all of them to find out if we should
-+ * unmap any of them.
-+ *
-+ * +--------------------------------------------+
-+ * +---------------+----------------+ +----------------+
-+ * | : VMA 1 | VMA 2 | | VMA 3 : |
-+ * +---------------+----------------+ +----------------+
-+ * | memory region |
-+ * +--------------------------------------------+
-+ */
-+ do {
-+ struct vm_area_struct *vma = find_vma(current->mm, hva);
-+ hva_t vm_start, vm_end;
-+
-+ if (!vma || vma->vm_start >= reg_end)
-+ break;
-+
-+ /*
-+ * Take the intersection of this VMA with the memory region
-+ */
-+ vm_start = max(hva, vma->vm_start);
-+ vm_end = min(reg_end, vma->vm_end);
-+
-+ if (!(vma->vm_flags & VM_PFNMAP)) {
-+ gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
-+ unmap_stage2_range(kvm, gpa, vm_end - vm_start);
-+ }
-+ hva = vm_end;
-+ } while (hva < reg_end);
-+}
-+
-+/**
-+ * stage2_unmap_vm - Unmap Stage-2 RAM mappings
-+ * @kvm: The struct kvm pointer
-+ *
-+ * Go through the memregions and unmap any reguler RAM
-+ * backing memory already mapped to the VM.
-+ */
-+void stage2_unmap_vm(struct kvm *kvm)
-+{
-+ struct kvm_memslots *slots;
-+ struct kvm_memory_slot *memslot;
-+ int idx;
-+
-+ idx = srcu_read_lock(&kvm->srcu);
-+ spin_lock(&kvm->mmu_lock);
-+
-+ slots = kvm_memslots(kvm);
-+ kvm_for_each_memslot(memslot, slots)
-+ stage2_unmap_memslot(kvm, memslot);
-+
-+ spin_unlock(&kvm->mmu_lock);
-+ srcu_read_unlock(&kvm->srcu, idx);
-+}
-+
- /**
- * kvm_free_stage2_pgd - free all stage-2 tables
- * @kvm: The KVM struct pointer for the VM.
-@@ -746,6 +812,19 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
- return false;
- }
-
-+static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-+{
-+ if (kvm_vcpu_trap_is_iabt(vcpu))
-+ return false;
-+
-+ return kvm_vcpu_dabt_iswrite(vcpu);
-+}
-+
-+static bool kvm_is_device_pfn(unsigned long pfn)
-+{
-+ return !pfn_valid(pfn);
-+}
-+
- static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_memory_slot *memslot,
- unsigned long fault_status)
-@@ -761,7 +840,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- pfn_t pfn;
- pgprot_t mem_type = PAGE_S2;
-
-- write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
-+ write_fault = kvm_is_write_fault(vcpu);
- if (fault_status == FSC_PERM && !write_fault) {
- kvm_err("Unexpected L2 read permission error\n");
- return -EFAULT;
-@@ -770,6 +849,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- /* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(&current->mm->mmap_sem);
- vma = find_vma_intersection(current->mm, hva, hva + 1);
-+ if (unlikely(!vma)) {
-+ kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
-+ up_read(&current->mm->mmap_sem);
-+ return -EFAULT;
-+ }
-+
- if (is_vm_hugetlb_page(vma)) {
- hugetlb = true;
- gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
-@@ -810,7 +895,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- if (is_error_pfn(pfn))
- return -EFAULT;
-
-- if (kvm_is_mmio_pfn(pfn))
-+ if (kvm_is_device_pfn(pfn))
- mem_type = PAGE_S2_DEVICE;
-
- spin_lock(&kvm->mmu_lock);
-@@ -836,7 +921,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- }
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
- ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
-- mem_type == PAGE_S2_DEVICE);
-+ pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
- }
-
-
-@@ -912,6 +997,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
-
- memslot = gfn_to_memslot(vcpu->kvm, gfn);
-
-+ /* Userspace should not be able to register out-of-bounds IPAs */
-+ VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
-+
- ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
- if (ret == 0)
- ret = 1;
-@@ -1136,6 +1224,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
- {
-+ /*
-+ * Prevent userspace from creating a memory region outside of the IPA
-+ * space addressable by the KVM guest IPA space.
-+ */
-+ if (memslot->base_gfn + memslot->npages >=
-+ (KVM_PHYS_SIZE >> PAGE_SHIFT))
-+ return -EFAULT;
-+
- return 0;
- }
-
-diff --git a/arch/arm/mach-dove/board-dt.c b/arch/arm/mach-dove/board-dt.c
-index 49fa9ab..7a7a09a5 100644
---- a/arch/arm/mach-dove/board-dt.c
-+++ b/arch/arm/mach-dove/board-dt.c
-@@ -26,7 +26,7 @@ static void __init dove_dt_init(void)
- #ifdef CONFIG_CACHE_TAUROS2
- tauros2_init(0);
- #endif
-- BUG_ON(mvebu_mbus_dt_init());
-+ BUG_ON(mvebu_mbus_dt_init(false));
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- }
-
-diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
-index 01a5765..b509556 100644
---- a/arch/arm/mach-imx/clk-imx6q.c
-+++ b/arch/arm/mach-imx/clk-imx6q.c
-@@ -406,7 +406,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
- clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
- clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
- clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
-- clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
-+ clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
- clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
- clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
- clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
-diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
-index 7818815..79e629d 100644
---- a/arch/arm/mach-kirkwood/board-dt.c
-+++ b/arch/arm/mach-kirkwood/board-dt.c
-@@ -116,7 +116,7 @@ static void __init kirkwood_dt_init(void)
- */
- writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
-
-- BUG_ON(mvebu_mbus_dt_init());
-+ BUG_ON(mvebu_mbus_dt_init(false));
-
- kirkwood_l2_init();
-
-diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
-index f6c9d1d..79c3766a 100644
---- a/arch/arm/mach-mvebu/armada-370-xp.c
-+++ b/arch/arm/mach-mvebu/armada-370-xp.c
-@@ -41,7 +41,7 @@ static void __init armada_370_xp_timer_and_clk_init(void)
- of_clk_init(NULL);
- clocksource_of_init();
- coherency_init();
-- BUG_ON(mvebu_mbus_dt_init());
-+ BUG_ON(mvebu_mbus_dt_init(coherency_available()));
- #ifdef CONFIG_CACHE_L2X0
- l2x0_of_init(0, ~0UL);
- #endif
-diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
-index c295c10..49bad4d 100644
---- a/arch/arm/mach-mvebu/coherency.c
-+++ b/arch/arm/mach-mvebu/coherency.c
-@@ -121,6 +121,20 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
- .notifier_call = mvebu_hwcc_platform_notifier,
- };
-
-+/*
-+ * Keep track of whether we have IO hardware coherency enabled or not.
-+ * On Armada 370's we will not be using it for example. We need to make
-+ * that available [through coherency_available()] so the mbus controller
-+ * doesn't enable the IO coherency bit in the attribute bits of the
-+ * chip selects.
-+ */
-+static int coherency_enabled;
-+
-+int coherency_available(void)
-+{
-+ return coherency_enabled;
-+}
-+
- int __init coherency_init(void)
- {
- struct device_node *np;
-@@ -164,6 +178,7 @@ int __init coherency_init(void)
- coherency_base = of_iomap(np, 0);
- coherency_cpu_base = of_iomap(np, 1);
- set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
-+ coherency_enabled = 1;
- of_node_put(np);
- }
-
-diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
-index 760226c..63e18c6 100644
---- a/arch/arm/mach-mvebu/coherency.h
-+++ b/arch/arm/mach-mvebu/coherency.h
-@@ -17,6 +17,7 @@
- extern unsigned long coherency_phys_base;
-
- int set_cpu_coherent(unsigned int cpu_id, int smp_group_id);
-+int coherency_available(void);
- int coherency_init(void);
-
- #endif /* __MACH_370_XP_COHERENCY_H */
-diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
-index 00fbaa7..ea68925 100644
---- a/arch/arm64/include/asm/kvm_arm.h
-+++ b/arch/arm64/include/asm/kvm_arm.h
-@@ -18,6 +18,7 @@
- #ifndef __ARM64_KVM_ARM_H__
- #define __ARM64_KVM_ARM_H__
-
-+#include <asm/memory.h>
- #include <asm/types.h>
-
- /* Hyp Configuration Register (HCR) bits */
-@@ -122,6 +123,17 @@
- #define VTCR_EL2_T0SZ_MASK 0x3f
- #define VTCR_EL2_T0SZ_40B 24
-
-+/*
-+ * We configure the Stage-2 page tables to always restrict the IPA space to be
-+ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
-+ * not known to exist and will break with this configuration.
-+ *
-+ * Note that when using 4K pages, we concatenate two first level page tables
-+ * together.
-+ *
-+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
-+ * D4-23 and D4-25 in ARM DDI 0487A.b.
-+ */
- #ifdef CONFIG_ARM64_64K_PAGES
- /*
- * Stage2 translation configuration:
-@@ -151,9 +163,9 @@
- #endif
-
- #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
--#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
--#define VTTBR_VMID_SHIFT (48LLU)
--#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
-+#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-+#define VTTBR_VMID_SHIFT (UL(48))
-+#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
-
- /* Hyp System Trap Register */
- #define HSTR_EL2_TTEE (1 << 16)
-@@ -176,13 +188,13 @@
-
- /* Exception Syndrome Register (ESR) bits */
- #define ESR_EL2_EC_SHIFT (26)
--#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
--#define ESR_EL2_IL (1U << 25)
-+#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
-+#define ESR_EL2_IL (UL(1) << 25)
- #define ESR_EL2_ISS (ESR_EL2_IL - 1)
- #define ESR_EL2_ISV_SHIFT (24)
--#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
-+#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
- #define ESR_EL2_SAS_SHIFT (22)
--#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
-+#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
- #define ESR_EL2_SSE (1 << 21)
- #define ESR_EL2_SRT_SHIFT (16)
- #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
-@@ -196,16 +208,16 @@
- #define ESR_EL2_FSC_TYPE (0x3c)
-
- #define ESR_EL2_CV_SHIFT (24)
--#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
-+#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
- #define ESR_EL2_COND_SHIFT (20)
--#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
-+#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
-
-
- #define FSC_FAULT (0x04)
- #define FSC_PERM (0x0c)
-
- /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
--#define HPFAR_MASK (~0xFUL)
-+#define HPFAR_MASK (~UL(0xf))
-
- #define ESR_EL2_EC_UNKNOWN (0x00)
- #define ESR_EL2_EC_WFI (0x01)
-diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
-index dd8ecfc3..681cb90 100644
---- a/arch/arm64/include/asm/kvm_emulate.h
-+++ b/arch/arm64/include/asm/kvm_emulate.h
-@@ -38,6 +38,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
- void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
- void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
-
-+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-+{
-+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
-+}
-+
- static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
- {
- return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
-diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
-index 8e138c7..0d51874 100644
---- a/arch/arm64/include/asm/kvm_mmu.h
-+++ b/arch/arm64/include/asm/kvm_mmu.h
-@@ -59,10 +59,9 @@
- #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
-
- /*
-- * Align KVM with the kernel's view of physical memory. Should be
-- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
-+ * We currently only support a 40bit IPA.
- */
--#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
-+#define KVM_PHYS_SHIFT (40)
- #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
- #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
-
-@@ -75,6 +74,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
- void free_boot_hyp_pgd(void);
- void free_hyp_pgds(void);
-
-+void stage2_unmap_vm(struct kvm *kvm);
- int kvm_alloc_stage2_pgd(struct kvm *kvm);
- void kvm_free_stage2_pgd(struct kvm *kvm);
- int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
-@@ -93,19 +93,6 @@ void kvm_clear_hyp_idmap(void);
- #define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
- #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
-
--static inline bool kvm_is_write_fault(unsigned long esr)
--{
-- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
--
-- if (esr_ec == ESR_EL2_EC_IABT)
-- return false;
--
-- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
-- return false;
--
-- return true;
--}
--
- static inline void kvm_clean_pgd(pgd_t *pgd) {}
- static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
- static inline void kvm_clean_pte(pte_t *pte) {}
-diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
-index 0874557..a8d81fa 100644
---- a/arch/arm64/kvm/guest.c
-+++ b/arch/arm64/kvm/guest.c
-@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
-
- int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
- {
-- vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
- return 0;
- }
-
-diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
-index 3974881..b76159a 100644
---- a/arch/arm64/mm/dma-mapping.c
-+++ b/arch/arm64/mm/dma-mapping.c
-@@ -54,8 +54,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- addr = page_address(page);
-- if (flags & __GFP_ZERO)
-- memset(addr, 0, size);
-+ memset(addr, 0, size);
- return addr;
- } else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 2f645c9..5dab54a 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -160,7 +160,7 @@ config SBUS
-
- config NEED_DMA_MAP_STATE
- def_bool y
-- depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
-+ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
-
- config NEED_SG_DMA_LENGTH
- def_bool y
-diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
-index 18f7391..43a07bf 100644
---- a/arch/x86/kernel/cpu/microcode/intel_early.c
-+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
-@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
- unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
- int i;
-
-- while (leftover) {
-+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
- mc_header = (struct microcode_header_intel *)ucode_ptr;
-
- mc_size = get_totalsize(mc_header);
-diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
-index a1f5b18..490fee1 100644
---- a/arch/x86/kernel/kprobes/core.c
-+++ b/arch/x86/kernel/kprobes/core.c
-@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
- {
- struct insn insn;
- kprobe_opcode_t buf[MAX_INSN_SIZE];
-+ int length;
-
- kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
- insn_get_length(&insn);
-+ length = insn.length;
-+
- /* Another subsystem puts a breakpoint, failed to recover */
- if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
- return 0;
-- memcpy(dest, insn.kaddr, insn.length);
-+ memcpy(dest, insn.kaddr, length);
-
- #ifdef CONFIG_X86_64
- if (insn_rip_relative(&insn)) {
-@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
- *(s32 *) disp = (s32) newdisp;
- }
- #endif
-- return insn.length;
-+ return length;
- }
-
- static int __kprobes arch_copy_kprobe(struct kprobe *p)
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 9643eda6..0746334 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
-
-- if (svm->vmcb->control.next_rip != 0)
-+ if (svm->vmcb->control.next_rip != 0) {
-+ WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
- svm->next_rip = svm->vmcb->control.next_rip;
-+ }
-
- if (!svm->next_rip) {
- if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
-@@ -4246,7 +4248,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
- break;
- }
-
-- vmcb->control.next_rip = info->next_rip;
-+ /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
-+ if (static_cpu_has(X86_FEATURE_NRIPS))
-+ vmcb->control.next_rip = info->next_rip;
- vmcb->control.exit_code = icpt_info.exit_code;
- vmexit = nested_svm_exit_handled(svm);
-
-diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
-index e990dee..1aa0130 100644
---- a/drivers/bus/mvebu-mbus.c
-+++ b/drivers/bus/mvebu-mbus.c
-@@ -701,7 +701,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
- phys_addr_t sdramwins_phys_base,
- size_t sdramwins_size)
- {
-- struct device_node *np;
- int win;
-
- mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
-@@ -714,12 +713,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
- return -ENOMEM;
- }
-
-- np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
-- if (np) {
-- mbus->hw_io_coherency = 1;
-- of_node_put(np);
-- }
--
- for (win = 0; win < mbus->soc->num_wins; win++)
- mvebu_mbus_disable_window(mbus, win);
-
-@@ -889,7 +882,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
- }
- }
-
--int __init mvebu_mbus_dt_init(void)
-+int __init mvebu_mbus_dt_init(bool is_coherent)
- {
- struct resource mbuswins_res, sdramwins_res;
- struct device_node *np, *controller;
-@@ -928,6 +921,8 @@ int __init mvebu_mbus_dt_init(void)
- return -EINVAL;
- }
-
-+ mbus_state.hw_io_coherency = is_coherent;
-+
- /* Get optional pcie-{mem,io}-aperture properties */
- mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
- &mbus_state.pcie_io_aperture);
-diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
-index c611bcc..3e623ab 100644
---- a/drivers/edac/sb_edac.c
-+++ b/drivers/edac/sb_edac.c
-@@ -765,7 +765,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- u32 reg;
- u64 limit, prv = 0;
- u64 tmp_mb;
-- u32 mb, kb;
-+ u32 gb, mb;
- u32 rir_way;
-
- /*
-@@ -775,15 +775,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- pvt->tolm = pvt->info.get_tolm(pvt);
- tmp_mb = (1 + pvt->tolm) >> 20;
-
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-- edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
-+ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
-+ gb, (mb*1000)/1024, (u64)pvt->tolm);
-
- /* Address range is already 45:25 */
- pvt->tohm = pvt->info.get_tohm(pvt);
- tmp_mb = (1 + pvt->tohm) >> 20;
-
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-- edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
-+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
-+ gb, (mb*1000)/1024, (u64)pvt->tohm);
-
- /*
- * Step 2) Get SAD range and SAD Interleave list
-@@ -805,11 +807,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- break;
-
- tmp_mb = (limit + 1) >> 20;
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
- edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
- n_sads,
- get_dram_attr(reg),
-- mb, kb,
-+ gb, (mb*1000)/1024,
- ((u64)tmp_mb) << 20L,
- INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
- reg);
-@@ -840,9 +842,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- break;
- tmp_mb = (limit + 1) >> 20;
-
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
- edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
-- n_tads, mb, kb,
-+ n_tads, gb, (mb*1000)/1024,
- ((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
-@@ -865,10 +867,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- tad_ch_nilv_offset[j],
- &reg);
- tmp_mb = TAD_OFFSET(reg) >> 20;
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
- edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
- i, j,
-- mb, kb,
-+ gb, (mb*1000)/1024,
- ((u64)tmp_mb) << 20L,
- reg);
- }
-@@ -890,10 +892,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
-
- tmp_mb = RIR_LIMIT(reg) >> 20;
- rir_way = 1 << RIR_WAY(reg);
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
- edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
- i, j,
-- mb, kb,
-+ gb, (mb*1000)/1024,
- ((u64)tmp_mb) << 20L,
- rir_way,
- reg);
-@@ -904,10 +906,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
- &reg);
- tmp_mb = RIR_OFFSET(reg) << 6;
-
-- mb = div_u64_rem(tmp_mb, 1000, &kb);
-+ gb = div_u64_rem(tmp_mb, 1024, &mb);
- edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
- i, j, k,
-- mb, kb,
-+ gb, (mb*1000)/1024,
- ((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
- reg);
-@@ -945,7 +947,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
- u8 ch_way, sck_way, pkg, sad_ha = 0;
- u32 tad_offset;
- u32 rir_way;
-- u32 mb, kb;
-+ u32 mb, gb;
- u64 ch_addr, offset, limit = 0, prv = 0;
-
-
-@@ -1183,10 +1185,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
- continue;
-
- limit = RIR_LIMIT(reg);
-- mb = div_u64_rem(limit >> 20, 1000, &kb);
-+ gb = div_u64_rem(limit >> 20, 1024, &mb);
- edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
- n_rir,
-- mb, kb,
-+ gb, (mb*1000)/1024,
- limit,
- 1 << RIR_WAY(reg));
- if (ch_addr <= limit)
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index 019a04a..a467261 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -810,8 +810,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
-- MLX4_WQE_CTRL_TCP_UDP_CSUM);
-+ if (!skb->encapsulation)
-+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
-+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
-+ else
-+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
- ring->tx_csum++;
- }
-
-diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
-index 528bff5..85d370e 100644
---- a/drivers/scsi/hpsa.c
-+++ b/drivers/scsi/hpsa.c
-@@ -3984,10 +3984,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
-
- /* Save the PCI command register */
- pci_read_config_word(pdev, 4, &command_register);
-- /* Turn the board off. This is so that later pci_restore_state()
-- * won't turn the board on before the rest of config space is ready.
-- */
-- pci_disable_device(pdev);
- pci_save_state(pdev);
-
- /* find the first memory BAR, so we can find the cfg table */
-@@ -4035,11 +4031,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
- goto unmap_cfgtable;
-
- pci_restore_state(pdev);
-- rc = pci_enable_device(pdev);
-- if (rc) {
-- dev_warn(&pdev->dev, "failed to enable device.\n");
-- goto unmap_cfgtable;
-- }
- pci_write_config_word(pdev, 4, command_register);
-
- /* Some devices (notably the HP Smart Array 5i Controller)
-@@ -4525,6 +4516,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
- if (!reset_devices)
- return 0;
-
-+ /* kdump kernel is loading, we don't know in which state is
-+ * the pci interface. The dev->enable_cnt is equal zero
-+ * so we call enable+disable, wait a while and switch it on.
-+ */
-+ rc = pci_enable_device(pdev);
-+ if (rc) {
-+ dev_warn(&pdev->dev, "Failed to enable PCI device\n");
-+ return -ENODEV;
-+ }
-+ pci_disable_device(pdev);
-+ msleep(260); /* a randomly chosen number */
-+ rc = pci_enable_device(pdev);
-+ if (rc) {
-+ dev_warn(&pdev->dev, "failed to enable device.\n");
-+ return -ENODEV;
-+ }
-+ pci_set_master(pdev);
- /* Reset the controller with a PCI power-cycle or via doorbell */
- rc = hpsa_kdump_hard_reset_controller(pdev);
-
-@@ -4533,10 +4541,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
- * "performant mode". Or, it might be 640x, which can't reset
- * due to concerns about shared bbwc between 6402/6404 pair.
- */
-- if (rc == -ENOTSUPP)
-- return rc; /* just try to do the kdump anyhow. */
-- if (rc)
-- return -ENODEV;
-+ if (rc) {
-+ if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
-+ rc = -ENODEV;
-+ goto out_disable;
-+ }
-
- /* Now try to get the controller to respond to a no-op */
- dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
-@@ -4547,7 +4556,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
- dev_warn(&pdev->dev, "no-op failed%s\n",
- (i < 11 ? "; re-trying" : ""));
- }
-- return 0;
-+
-+out_disable:
-+
-+ pci_disable_device(pdev);
-+ return rc;
- }
-
- static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
-@@ -4690,6 +4703,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
-+ pci_disable_device(h->pdev);
- pci_release_regions(h->pdev);
- kfree(h);
- }
-diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
-index 93de3ba..f8ffee4 100644
---- a/fs/btrfs/ctree.c
-+++ b/fs/btrfs/ctree.c
-@@ -2963,7 +2963,7 @@ done:
- */
- if (!p->leave_spinning)
- btrfs_set_path_blocking(p);
-- if (ret < 0)
-+ if (ret < 0 && !p->skip_release_on_error)
- btrfs_release_path(p);
- return ret;
- }
-diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
-index d3511cc..3b39eb4 100644
---- a/fs/btrfs/ctree.h
-+++ b/fs/btrfs/ctree.h
-@@ -608,6 +608,7 @@ struct btrfs_path {
- unsigned int skip_locking:1;
- unsigned int leave_spinning:1;
- unsigned int search_commit_root:1;
-+ unsigned int skip_release_on_error:1;
- };
-
- /*
-@@ -3609,6 +3610,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
- int verify_dir_item(struct btrfs_root *root,
- struct extent_buffer *leaf,
- struct btrfs_dir_item *dir_item);
-+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
-+ struct btrfs_path *path,
-+ const char *name,
-+ int name_len);
-
- /* orphan.c */
- int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
-diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
-index a0691df..9521a93 100644
---- a/fs/btrfs/dir-item.c
-+++ b/fs/btrfs/dir-item.c
-@@ -21,10 +21,6 @@
- #include "hash.h"
- #include "transaction.h"
-
--static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
-- struct btrfs_path *path,
-- const char *name, int name_len);
--
- /*
- * insert a name into a directory, doing overflow properly if there is a hash
- * collision. data_size indicates how big the item inserted should be. On
-@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
- * this walks through all the entries in a dir item and finds one
- * for a specific name.
- */
--static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
-- struct btrfs_path *path,
-- const char *name, int name_len)
-+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
-+ struct btrfs_path *path,
-+ const char *name, int name_len)
- {
- struct btrfs_dir_item *dir_item;
- unsigned long name_ptr;
-diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
-index 488e987..618e86c 100644
---- a/fs/btrfs/xattr.c
-+++ b/fs/btrfs/xattr.c
-@@ -29,6 +29,7 @@
- #include "xattr.h"
- #include "disk-io.h"
- #include "props.h"
-+#include "locking.h"
-
-
- ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
-@@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
- struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
- {
-- struct btrfs_dir_item *di;
-+ struct btrfs_dir_item *di = NULL;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_path *path;
- size_t name_len = strlen(name);
-@@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-+ path->skip_release_on_error = 1;
-+
-+ if (!value) {
-+ di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
-+ name, name_len, -1);
-+ if (!di && (flags & XATTR_REPLACE))
-+ ret = -ENODATA;
-+ else if (di)
-+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
-+ goto out;
-+ }
-
-+ /*
-+ * For a replace we can't just do the insert blindly.
-+ * Do a lookup first (read-only btrfs_search_slot), and return if xattr
-+ * doesn't exist. If it exists, fall down below to the insert/replace
-+ * path - we can't race with a concurrent xattr delete, because the VFS
-+ * locks the inode's i_mutex before calling setxattr or removexattr.
-+ */
- if (flags & XATTR_REPLACE) {
-- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
-- name_len, -1);
-- if (IS_ERR(di)) {
-- ret = PTR_ERR(di);
-- goto out;
-- } else if (!di) {
-+ ASSERT(mutex_is_locked(&inode->i_mutex));
-+ di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
-+ name, name_len, 0);
-+ if (!di) {
- ret = -ENODATA;
- goto out;
- }
-- ret = btrfs_delete_one_dir_name(trans, root, path, di);
-- if (ret)
-- goto out;
- btrfs_release_path(path);
-+ di = NULL;
-+ }
-
-+ ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
-+ name, name_len, value, size);
-+ if (ret == -EOVERFLOW) {
- /*
-- * remove the attribute
-+ * We have an existing item in a leaf, split_leaf couldn't
-+ * expand it. That item might have or not a dir_item that
-+ * matches our target xattr, so lets check.
- */
-- if (!value)
-- goto out;
-- } else {
-- di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
-- name, name_len, 0);
-- if (IS_ERR(di)) {
-- ret = PTR_ERR(di);
-+ ret = 0;
-+ btrfs_assert_tree_locked(path->nodes[0]);
-+ di = btrfs_match_dir_item_name(root, path, name, name_len);
-+ if (!di && !(flags & XATTR_REPLACE)) {
-+ ret = -ENOSPC;
- goto out;
- }
-- if (!di && !value)
-- goto out;
-- btrfs_release_path(path);
-+ } else if (ret == -EEXIST) {
-+ ret = 0;
-+ di = btrfs_match_dir_item_name(root, path, name, name_len);
-+ ASSERT(di); /* logic error */
-+ } else if (ret) {
-+ goto out;
- }
-
--again:
-- ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
-- name, name_len, value, size);
-- /*
-- * If we're setting an xattr to a new value but the new value is say
-- * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
-- * back from split_leaf. This is because it thinks we'll be extending
-- * the existing item size, but we're asking for enough space to add the
-- * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
-- * the rest of the function figure it out.
-- */
-- if (ret == -EOVERFLOW)
-+ if (di && (flags & XATTR_CREATE)) {
- ret = -EEXIST;
-+ goto out;
-+ }
-
-- if (ret == -EEXIST) {
-- if (flags & XATTR_CREATE)
-- goto out;
-+ if (di) {
- /*
-- * We can't use the path we already have since we won't have the
-- * proper locking for a delete, so release the path and
-- * re-lookup to delete the thing.
-+ * We're doing a replace, and it must be atomic, that is, at
-+ * any point in time we have either the old or the new xattr
-+ * value in the tree. We don't want readers (getxattr and
-+ * listxattrs) to miss a value, this is specially important
-+ * for ACLs.
- */
-- btrfs_release_path(path);
-- di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
-- name, name_len, -1);
-- if (IS_ERR(di)) {
-- ret = PTR_ERR(di);
-- goto out;
-- } else if (!di) {
-- /* Shouldn't happen but just in case... */
-- btrfs_release_path(path);
-- goto again;
-+ const int slot = path->slots[0];
-+ struct extent_buffer *leaf = path->nodes[0];
-+ const u16 old_data_len = btrfs_dir_data_len(leaf, di);
-+ const u32 item_size = btrfs_item_size_nr(leaf, slot);
-+ const u32 data_size = sizeof(*di) + name_len + size;
-+ struct btrfs_item *item;
-+ unsigned long data_ptr;
-+ char *ptr;
-+
-+ if (size > old_data_len) {
-+ if (btrfs_leaf_free_space(root, leaf) <
-+ (size - old_data_len)) {
-+ ret = -ENOSPC;
-+ goto out;
-+ }
- }
-
-- ret = btrfs_delete_one_dir_name(trans, root, path, di);
-- if (ret)
-- goto out;
-+ if (old_data_len + name_len + sizeof(*di) == item_size) {
-+ /* No other xattrs packed in the same leaf item. */
-+ if (size > old_data_len)
-+ btrfs_extend_item(root, path,
-+ size - old_data_len);
-+ else if (size < old_data_len)
-+ btrfs_truncate_item(root, path, data_size, 1);
-+ } else {
-+ /* There are other xattrs packed in the same item. */
-+ ret = btrfs_delete_one_dir_name(trans, root, path, di);
-+ if (ret)
-+ goto out;
-+ btrfs_extend_item(root, path, data_size);
-+ }
-
-+ item = btrfs_item_nr(slot);
-+ ptr = btrfs_item_ptr(leaf, slot, char);
-+ ptr += btrfs_item_size(leaf, item) - data_size;
-+ di = (struct btrfs_dir_item *)ptr;
-+ btrfs_set_dir_data_len(leaf, di, size);
-+ data_ptr = ((unsigned long)(di + 1)) + name_len;
-+ write_extent_buffer(leaf, value, data_ptr, size);
-+ btrfs_mark_buffer_dirty(leaf);
-+ } else {
- /*
-- * We have a value to set, so go back and try to insert it now.
-+ * Insert, and we had space for the xattr, so path->slots[0] is
-+ * where our xattr dir_item is and btrfs_insert_xattr_item()
-+ * filled it.
- */
-- if (value) {
-- btrfs_release_path(path);
-- goto again;
-- }
- }
- out:
- btrfs_free_path(path);
-diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
-index 7fe30f6..35f54bc 100644
---- a/fs/ocfs2/file.c
-+++ b/fs/ocfs2/file.c
-@@ -2478,9 +2478,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
- struct address_space *mapping = out->f_mapping;
- struct inode *inode = mapping->host;
- struct splice_desc sd = {
-- .total_len = len,
- .flags = flags,
-- .pos = *ppos,
- .u.file = out,
- };
-
-@@ -2490,6 +2488,12 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
- out->f_path.dentry->d_name.len,
- out->f_path.dentry->d_name.name, len);
-
-+ ret = generic_write_checks(out, ppos, &len, 0);
-+ if (ret)
-+ return ret;
-+ sd.total_len = len;
-+ sd.pos = *ppos;
-+
- pipe_lock(pipe);
-
- splice_from_pipe_begin(&sd);
-diff --git a/fs/splice.c b/fs/splice.c
-index 12028fa..f345d53 100644
---- a/fs/splice.c
-+++ b/fs/splice.c
-@@ -1012,13 +1012,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
- struct address_space *mapping = out->f_mapping;
- struct inode *inode = mapping->host;
- struct splice_desc sd = {
-- .total_len = len,
- .flags = flags,
-- .pos = *ppos,
- .u.file = out,
- };
- ssize_t ret;
-
-+ ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
-+ if (ret)
-+ return ret;
-+ sd.total_len = len;
-+ sd.pos = *ppos;
-+
- pipe_lock(pipe);
-
- splice_from_pipe_begin(&sd);
-diff --git a/include/linux/mbus.h b/include/linux/mbus.h
-index 345b8c5..550c88f 100644
---- a/include/linux/mbus.h
-+++ b/include/linux/mbus.h
-@@ -73,6 +73,6 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size);
- int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
- size_t mbus_size, phys_addr_t sdram_phys_base,
- size_t sdram_size);
--int mvebu_mbus_dt_init(void);
-+int mvebu_mbus_dt_init(bool is_coherent);
-
- #endif /* __LINUX_MBUS_H */
-diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
-index c68e5e0..99de240 100644
---- a/net/netfilter/nf_tables_api.c
-+++ b/net/netfilter/nf_tables_api.c
-@@ -855,7 +855,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
-
- if (nla[NFTA_CHAIN_POLICY]) {
- if ((chain != NULL &&
-- !(chain->flags & NFT_BASE_CHAIN)) ||
-+ !(chain->flags & NFT_BASE_CHAIN)))
-+ return -EOPNOTSUPP;
-+
-+ if (chain == NULL &&
- nla[NFTA_CHAIN_HOOK] == NULL)
- return -EOPNOTSUPP;
-
-diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
-index 9e287cb..54330fb 100644
---- a/net/netfilter/nfnetlink_cthelper.c
-+++ b/net/netfilter/nfnetlink_cthelper.c
-@@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
- if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
- return -EINVAL;
-
-+ /* Not all fields are initialized so first zero the tuple */
-+ memset(tuple, 0, sizeof(struct nf_conntrack_tuple));
-+
- tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM]));
- tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
-
-@@ -86,7 +89,7 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
- static int
- nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
- {
-- const struct nf_conn_help *help = nfct_help(ct);
-+ struct nf_conn_help *help = nfct_help(ct);
-
- if (attr == NULL)
- return -EINVAL;
-@@ -94,7 +97,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
- if (help->helper->data_len == 0)
- return -EINVAL;
-
-- memcpy(&help->data, nla_data(attr), help->helper->data_len);
-+ memcpy(help->data, nla_data(attr), help->helper->data_len);
- return 0;
- }
-
-diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
-index 7350723..9695895 100644
---- a/net/netfilter/nft_compat.c
-+++ b/net/netfilter/nft_compat.c
-@@ -82,6 +82,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
- entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
- break;
- case AF_INET6:
-+ if (proto)
-+ entry->e6.ipv6.flags |= IP6T_F_PROTO;
-+
- entry->e6.ipv6.proto = proto;
- entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
- break;
-@@ -313,6 +316,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
- entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
- break;
- case AF_INET6:
-+ if (proto)
-+ entry->e6.ipv6.flags |= IP6T_F_PROTO;
-+
- entry->e6.ipv6.proto = proto;
- entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
- break;
-diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
-index 1316e55..c324a52 100644
---- a/virt/kvm/arm/vgic.c
-+++ b/virt/kvm/arm/vgic.c
-@@ -674,7 +674,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
-- int min_sgi = (offset & ~0x3) * 4;
-+ int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg = 0;
-@@ -695,7 +695,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- {
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
-- int min_sgi = (offset & ~0x3) * 4;
-+ int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg;
-@@ -1387,7 +1387,8 @@ out:
- int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
- bool level)
- {
-- if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
-+ if (likely(vgic_initialized(kvm)) &&
-+ vgic_update_irq_state(kvm, cpuid, irq_num, level))
- vgic_kick_vcpus(kvm);
-
- return 0;
-@@ -1610,7 +1611,7 @@ out:
-
- int kvm_vgic_create(struct kvm *kvm)
- {
-- int i, vcpu_lock_idx = -1, ret = 0;
-+ int i, vcpu_lock_idx = -1, ret;
- struct kvm_vcpu *vcpu;
-
- mutex_lock(&kvm->lock);
-@@ -1625,6 +1626,7 @@ int kvm_vgic_create(struct kvm *kvm)
- * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
- * that no other VCPUs are run while we create the vgic.
- */
-+ ret = -EBUSY;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!mutex_trylock(&vcpu->mutex))
- goto out_unlock;
-@@ -1632,11 +1634,10 @@ int kvm_vgic_create(struct kvm *kvm)
- }
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
-- if (vcpu->arch.has_run_once) {
-- ret = -EBUSY;
-+ if (vcpu->arch.has_run_once)
- goto out_unlock;
-- }
- }
-+ ret = 0;
-
- spin_lock_init(&kvm->arch.vgic.lock);
- kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
diff --git a/3.14.48/4420_grsecurity-3.1-3.14.48-201507111210.patch b/3.14.48/4420_grsecurity-3.1-3.14.48-201507251417.patch
index 8faa105..59a0c47 100644
--- a/3.14.48/4420_grsecurity-3.1-3.14.48-201507111210.patch
+++ b/3.14.48/4420_grsecurity-3.1-3.14.48-201507251417.patch
@@ -294,6 +294,39 @@ index 5d91ba1..ef1d374 100644
pcbit= [HW,ISDN]
pcd. [PARIDE]
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 855d9b3..154c500 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print [ X86 only ]
+ - l2cr [ PPC only ]
++- modify_ldt [ X86 only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msg_next_id [ sysv ipc ]
+@@ -381,6 +382,20 @@ This flag controls the L2 cache of G3 processor boards. If
+
+ ==============================================================
+
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+
+ A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
index 25393e8..65e3b07 100644
--- a/Makefile
@@ -4767,6 +4800,105 @@ index f15c22e..d830561 100644
}
}
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index 6adf591..00ad1e9 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -73,32 +73,52 @@ struct jit_ctx {
+
+ int bpf_jit_enable __read_mostly;
+
+-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
++static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
++ unsigned int size)
++{
++ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
++
++ if (!ptr)
++ return -EFAULT;
++ memcpy(ret, ptr, size);
++ return 0;
++}
++
++static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
+ {
+ u8 ret;
+ int err;
+
+- err = skb_copy_bits(skb, offset, &ret, 1);
++ if (offset < 0)
++ err = call_neg_helper(skb, offset, &ret, 1);
++ else
++ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+ }
+
+-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
+ {
+ u16 ret;
+ int err;
+
+- err = skb_copy_bits(skb, offset, &ret, 2);
++ if (offset < 0)
++ err = call_neg_helper(skb, offset, &ret, 2);
++ else
++ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+ }
+
+-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
+ {
+ u32 ret;
+ int err;
+
+- err = skb_copy_bits(skb, offset, &ret, 4);
++ if (offset < 0)
++ err = call_neg_helper(skb, offset, &ret, 4);
++ else
++ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+ }
+@@ -523,9 +543,6 @@ static int build_body(struct jit_ctx *ctx)
+ case BPF_S_LD_B_ABS:
+ load_order = 0;
+ load:
+- /* the interpreter will deal with the negative K */
+- if ((int)k < 0)
+- return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+ load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -534,12 +551,24 @@ load_common:
+ emit(ARM_SUB_I(r_scratch, r_skb_hl,
+ 1 << load_order), ctx);
+ emit(ARM_CMP_R(r_scratch, r_off), ctx);
+- condt = ARM_COND_HS;
++ condt = ARM_COND_GE;
+ } else {
+ emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
+ condt = ARM_COND_HI;
+ }
+
++ /*
++ * test for negative offset, only if we are
++ * currently scheduled to take the fast
++ * path. this will update the flags so that
++ * the slowpath instruction are ignored if the
++ * offset is negative.
++ *
++ * for loard_order == 0 the HI condition will
++ * make loads at offset 0 take the slow path too.
++ */
++ _emit(condt, ARM_CMP_I(r_off, 0), ctx);
++
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
index 5b217f4..c23f40e 100644
--- a/arch/arm/plat-iop/setup.c
@@ -12396,7 +12528,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 96e743a..7f93c3a 100644
+index 96e743a..ca34a86 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,6 +22,7 @@ config X86_64
@@ -12499,6 +12631,29 @@ index 96e743a..7f93c3a 100644
---help---
Map the 32-bit VDSO to the predictable old-style address too.
+@@ -1914,6 +1921,22 @@ config CMDLINE_OVERRIDE
+ This is used to work around broken boot loaders. This should
+ be set to 'N' under normal conditions.
+
++config DEFAULT_MODIFY_LDT_SYSCALL
++ bool "Allow userspace to modify the LDT by default"
++ default y
++
++ ---help---
++ Modifying the LDT (Local Descriptor Table) may be needed to run a
++ 16-bit or segmented code such as Dosemu or Wine. This is done via
++ a system call which is not needed to run portable applications,
++ and which can sometimes be abused to exploit some weaknesses of
++ the architecture, opening new vulnerabilities.
++
++ For this reason this option allows one to enable or disable the
++ feature at runtime. It is recommended to say 'N' here to leave
++ the system protected, and to enable it at runtime only if needed
++ by setting the sys.kernel.modify_ldt sysctl.
++
+ endmenu
+
+ config ARCH_ENABLE_MEMORY_HOTPLUG
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index f3aaf23..a1d3c49 100644
--- a/arch/x86/Kconfig.cpu
@@ -26013,10 +26168,33 @@ index c2bedae..25e7ab60 100644
.name = "data",
.mode = S_IRUGO,
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
-index c37886d..3f425e3 100644
+index c37886d..f43b63d 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
-@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+@@ -11,6 +11,7 @@
+ #include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
++#include <linux/ratelimit.h>
+ #include <linux/smp.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+@@ -20,6 +21,14 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+
++#ifdef CONFIG_GRKERNSEC
++int sysctl_modify_ldt __read_only = 0;
++#elif defined(CONFIG_DEFAULT_MODIFY_LDT_SYSCALL)
++int sysctl_modify_ldt __read_only = 1;
++#else
++int sysctl_modify_ldt __read_only = 0;
++#endif
++
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -66,13 +75,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
if (reload) {
#ifdef CONFIG_SMP
preempt_disable();
@@ -26032,7 +26210,7 @@ index c37886d..3f425e3 100644
#endif
}
if (oldsize) {
-@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+@@ -94,7 +103,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
return err;
for (i = 0; i < old->size; i++)
@@ -26041,7 +26219,7 @@ index c37886d..3f425e3 100644
return 0;
}
-@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+@@ -115,6 +124,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
retval = copy_ldt(&mm->context, &old_mm->context);
mutex_unlock(&old_mm->context.lock);
}
@@ -26066,7 +26244,7 @@ index c37886d..3f425e3 100644
return retval;
}
-@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+@@ -229,6 +256,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
}
}
@@ -26080,6 +26258,22 @@ index c37886d..3f425e3 100644
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
error = -EINVAL;
goto out_unlock;
+@@ -254,6 +288,15 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
+ {
+ int ret = -ENOSYS;
+
++ if (!sysctl_modify_ldt) {
++ printk_ratelimited(KERN_INFO
++ "Denied a call to modify_ldt() from %s[%d] (uid: %d)."
++ " Adjust sysctl if this was not an exploit attempt.\n",
++ current->comm, task_pid_nr(current),
++ from_kuid_munged(current_user_ns(), current_uid()));
++ return ret;
++ }
++
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 1667b1d..16492c5 100644
--- a/arch/x86/kernel/machine_kexec_32.c
@@ -48521,6 +48715,74 @@ index dff0977..6df4b1d 100644
adapter->vfinfo[vf].spoofchk_enabled = setting;
regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 96fc7fe..1c776d6 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1441,7 +1441,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
+ {
+ struct net_device *dev = pp->dev;
+- int rx_done, rx_filled;
++ int rx_done;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
+
+@@ -1452,7 +1452,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ rx_todo = rx_done;
+
+ rx_done = 0;
+- rx_filled = 0;
+
+ /* Fairness NAPI loop */
+ while (rx_done < rx_todo) {
+@@ -1463,7 +1462,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ int rx_bytes, err;
+
+ rx_done++;
+- rx_filled++;
+ rx_status = rx_desc->status;
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ data = (unsigned char *)rx_desc->buf_cookie;
+@@ -1503,6 +1501,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ continue;
+ }
+
++ /* Refill processing */
++ err = mvneta_rx_refill(pp, rx_desc);
++ if (err) {
++ netdev_err(dev, "Linux processing - Can't refill\n");
++ rxq->missed++;
++ goto err_drop_frame;
++ }
++
+ skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+ if (!skb)
+ goto err_drop_frame;
+@@ -1522,14 +1528,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ mvneta_rx_csum(pp, rx_status, skb);
+
+ napi_gro_receive(&pp->napi, skb);
+-
+- /* Refill processing */
+- err = mvneta_rx_refill(pp, rx_desc);
+- if (err) {
+- netdev_err(dev, "Linux processing - Can't refill\n");
+- rxq->missed++;
+- rx_filled--;
+- }
+ }
+
+ if (rcvd_pkts) {
+@@ -1542,7 +1540,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
+ }
+
+ /* Update rxq management counters */
+- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
++ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+
+ return rx_done;
+ }
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd..6d9e10d 100644
--- a/drivers/net/ethernet/neterion/s2io.c
@@ -48824,7 +49086,7 @@ index fbf7dcd..ad71499 100644
};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index e8c21f9..747b848 100644
+index e8c21f9..6ecd8cc 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -422,7 +422,7 @@ static void macvtap_setup(struct net_device *dev)
@@ -48854,6 +49116,14 @@ index e8c21f9..747b848 100644
.notifier_call = macvtap_device_event,
};
+@@ -1250,6 +1250,7 @@ static void macvtap_exit(void)
+ class_unregister(macvtap_class);
+ cdev_del(&macvtap_cdev);
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
++ idr_destroy(&minor_idr);
+ }
+ module_exit(macvtap_exit);
+
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index d2bb12b..d6c921e 100644
--- a/drivers/net/nlmon.c
@@ -49148,6 +49418,51 @@ index 841b608..198a8b7 100644
#define VIRTNET_DRIVER_VERSION "1.0.0"
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 0fa3b44..e913fc9 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1156,7 +1156,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ static const u32 rxprod_reg[2] = {
+ VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
+ };
+- u32 num_rxd = 0;
++ u32 num_pkts = 0;
+ bool skip_page_frags = false;
+ struct Vmxnet3_RxCompDesc *rcd;
+ struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
+@@ -1174,13 +1174,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ struct Vmxnet3_RxDesc *rxd;
+ u32 idx, ring_idx;
+ struct vmxnet3_cmd_ring *ring = NULL;
+- if (num_rxd >= quota) {
++ if (num_pkts >= quota) {
+ /* we may stop even before we see the EOP desc of
+ * the current pkt
+ */
+ break;
+ }
+- num_rxd++;
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
+ idx = rcd->rxdIdx;
+ ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+@@ -1312,6 +1311,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ napi_gro_receive(&rq->napi, skb);
+
+ ctx->skb = NULL;
++ num_pkts++;
+ }
+
+ rcd_done:
+@@ -1342,7 +1342,7 @@ rcd_done:
+ &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
+ }
+
+- return num_rxd;
++ return num_pkts;
+ }
+
+
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5988910..be561a2 100644
--- a/drivers/net/vxlan.c
@@ -52600,6 +52915,21 @@ index 40d8592..8e89146 100644
int block_sectors = 0;
long error_sector;
struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index a1d6986..f310982 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1262,9 +1262,9 @@ static int st_open(struct inode *inode, struct file *filp)
+ spin_lock(&st_use_lock);
+ STp->in_use = 0;
+ spin_unlock(&st_use_lock);
+- scsi_tape_put(STp);
+ if (resumed)
+ scsi_autopm_put_device(STp->device);
++ scsi_tape_put(STp);
+ return retval;
+
+ }
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d6563ec..a1c5da2 100644
--- a/drivers/spi/spi.c
@@ -65676,10 +66006,20 @@ index c71e886..61d3d44b 100644
if (retval > 0)
retval = 0;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
-index bb7991c..481e21a 100644
+index bb7991c..9c2bc01 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
-@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ unlock_new_inode(inode);
+ return inode;
+ error:
+- unlock_new_inode(inode);
+- iput(inode);
++ iget_failed(inode);
+ return ERR_PTR(retval);
+
+ }
+@@ -1312,7 +1311,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
void
v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
@@ -65688,6 +66028,20 @@ index bb7991c..481e21a 100644
p9_debug(P9_DEBUG_VFS, " %s %s\n",
dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 59dc8e8..de8606c 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ unlock_new_inode(inode);
+ return inode;
+ error:
+- unlock_new_inode(inode);
+- iput(inode);
++ iget_failed(inode);
+ return ERR_PTR(retval);
+
+ }
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 370b24c..ff0be7b 100644
--- a/fs/Kconfig.binfmt
@@ -68121,7 +68475,7 @@ index a93f7e6..d58bcbe 100644
return 0;
while (nr) {
diff --git a/fs/dcache.c b/fs/dcache.c
-index aa24f7d..befb5fd 100644
+index aa24f7d..8f1bf8c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -250,7 +250,7 @@ static void __d_free(struct rcu_head *head)
@@ -68133,7 +68487,17 @@ index aa24f7d..befb5fd 100644
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
-@@ -596,7 +596,7 @@ repeat:
+@@ -587,6 +587,9 @@ repeat:
+ if (unlikely(d_unhashed(dentry)))
+ goto kill_it;
+
++ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++ goto kill_it;
++
+ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
+ if (dentry->d_op->d_delete(dentry))
+ goto kill_it;
+@@ -596,7 +599,7 @@ repeat:
dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
@@ -68142,7 +68506,7 @@ index aa24f7d..befb5fd 100644
spin_unlock(&dentry->d_lock);
return;
-@@ -651,7 +651,7 @@ int d_invalidate(struct dentry * dentry)
+@@ -651,7 +654,7 @@ int d_invalidate(struct dentry * dentry)
* We also need to leave mountpoints alone,
* directory or not.
*/
@@ -68151,7 +68515,7 @@ index aa24f7d..befb5fd 100644
if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
spin_unlock(&dentry->d_lock);
return -EBUSY;
-@@ -667,7 +667,7 @@ EXPORT_SYMBOL(d_invalidate);
+@@ -667,7 +670,7 @@ EXPORT_SYMBOL(d_invalidate);
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
{
@@ -68160,7 +68524,7 @@ index aa24f7d..befb5fd 100644
}
static inline void __dget(struct dentry *dentry)
-@@ -708,8 +708,8 @@ repeat:
+@@ -708,8 +711,8 @@ repeat:
goto repeat;
}
rcu_read_unlock();
@@ -68171,7 +68535,7 @@ index aa24f7d..befb5fd 100644
spin_unlock(&ret->d_lock);
return ret;
}
-@@ -792,7 +792,7 @@ restart:
+@@ -792,7 +795,7 @@ restart:
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
@@ -68180,7 +68544,7 @@ index aa24f7d..befb5fd 100644
/*
* inform the fs via d_prune that this dentry
* is about to be unhashed and destroyed.
-@@ -884,7 +884,7 @@ static void shrink_dentry_list(struct list_head *list)
+@@ -884,7 +887,7 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
@@ -68189,7 +68553,7 @@ index aa24f7d..befb5fd 100644
spin_unlock(&dentry->d_lock);
continue;
}
-@@ -930,7 +930,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
+@@ -930,7 +933,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
* counts, just remove them from the LRU. Otherwise give them
* another pass through the LRU.
*/
@@ -68198,7 +68562,7 @@ index aa24f7d..befb5fd 100644
d_lru_isolate(dentry);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
-@@ -1269,7 +1269,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
+@@ -1269,7 +1272,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
* loop in shrink_dcache_parent() might not make any progress
* and loop forever.
*/
@@ -68207,7 +68571,7 @@ index aa24f7d..befb5fd 100644
dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
/*
-@@ -1323,11 +1323,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+@@ -1323,11 +1326,11 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
struct select_data *data = _data;
enum d_walk_ret ret = D_WALK_CONTINUE;
@@ -68221,7 +68585,7 @@ index aa24f7d..befb5fd 100644
goto out;
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
-@@ -1337,7 +1337,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+@@ -1337,7 +1340,7 @@ static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry->d_name.name,
@@ -68230,7 +68594,7 @@ index aa24f7d..befb5fd 100644
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
BUG();
-@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1495,7 +1498,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
*/
dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
if (name->len > DNAME_INLINE_LEN-1) {
@@ -68239,7 +68603,7 @@ index aa24f7d..befb5fd 100644
if (!dname) {
kmem_cache_free(dentry_cache, dentry);
return NULL;
-@@ -1513,7 +1513,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1513,7 +1516,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb();
dentry->d_name.name = dname;
@@ -68248,7 +68612,7 @@ index aa24f7d..befb5fd 100644
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
-@@ -1522,6 +1522,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1522,6 +1525,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dentry->d_sb = sb;
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
@@ -68258,7 +68622,7 @@ index aa24f7d..befb5fd 100644
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
-@@ -2276,7 +2279,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
+@@ -2276,7 +2282,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next;
}
@@ -68267,7 +68631,7 @@ index aa24f7d..befb5fd 100644
found = dentry;
spin_unlock(&dentry->d_lock);
break;
-@@ -2375,7 +2378,7 @@ again:
+@@ -2375,7 +2381,7 @@ again:
spin_lock(&dentry->d_lock);
inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode);
@@ -68276,7 +68640,7 @@ index aa24f7d..befb5fd 100644
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
-@@ -3308,7 +3311,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
+@@ -3308,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
@@ -68285,7 +68649,7 @@ index aa24f7d..befb5fd 100644
}
}
return D_WALK_CONTINUE;
-@@ -3424,7 +3427,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3424,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -82401,7 +82765,7 @@ index 0000000..25f54ef
+};
diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
new file mode 100644
-index 0000000..fd26052
+index 0000000..7dc01b3
--- /dev/null
+++ b/grsecurity/gracl_policy.c
@@ -0,0 +1,1781 @@
@@ -82860,7 +83224,7 @@ index 0000000..fd26052
+ get_fs_root(reaper->fs, &gr_real_root);
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", gr_get_dev_from_dentry(gr_real_root.dentry), gr_get_ino_from_dentry(gr_real_root.dentry));
+#endif
+
+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
@@ -85841,7 +86205,7 @@ index 0000000..8ca18bf
+}
diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
new file mode 100644
-index 0000000..4ed9e7d
+index 0000000..a364c58
--- /dev/null
+++ b/grsecurity/grsec_init.c
@@ -0,0 +1,290 @@
@@ -85854,61 +86218,61 @@ index 0000000..4ed9e7d
+#include <linux/percpu.h>
+#include <linux/module.h>
+
-+int grsec_enable_ptrace_readexec;
-+int grsec_enable_setxid;
-+int grsec_enable_symlinkown;
-+kgid_t grsec_symlinkown_gid;
-+int grsec_enable_brute;
-+int grsec_enable_link;
-+int grsec_enable_dmesg;
-+int grsec_enable_harden_ptrace;
-+int grsec_enable_harden_ipc;
-+int grsec_enable_fifo;
-+int grsec_enable_execlog;
-+int grsec_enable_signal;
-+int grsec_enable_forkfail;
-+int grsec_enable_audit_ptrace;
-+int grsec_enable_time;
-+int grsec_enable_group;
-+kgid_t grsec_audit_gid;
-+int grsec_enable_chdir;
-+int grsec_enable_mount;
-+int grsec_enable_rofs;
-+int grsec_deny_new_usb;
-+int grsec_enable_chroot_findtask;
-+int grsec_enable_chroot_mount;
-+int grsec_enable_chroot_shmat;
-+int grsec_enable_chroot_fchdir;
-+int grsec_enable_chroot_double;
-+int grsec_enable_chroot_pivot;
-+int grsec_enable_chroot_chdir;
-+int grsec_enable_chroot_chmod;
-+int grsec_enable_chroot_mknod;
-+int grsec_enable_chroot_nice;
-+int grsec_enable_chroot_execlog;
-+int grsec_enable_chroot_caps;
-+int grsec_enable_chroot_rename;
-+int grsec_enable_chroot_sysctl;
-+int grsec_enable_chroot_unix;
-+int grsec_enable_tpe;
-+kgid_t grsec_tpe_gid;
-+int grsec_enable_blackhole;
++int grsec_enable_ptrace_readexec __read_only;
++int grsec_enable_setxid __read_only;
++int grsec_enable_symlinkown __read_only;
++kgid_t grsec_symlinkown_gid __read_only;
++int grsec_enable_brute __read_only;
++int grsec_enable_link __read_only;
++int grsec_enable_dmesg __read_only;
++int grsec_enable_harden_ptrace __read_only;
++int grsec_enable_harden_ipc __read_only;
++int grsec_enable_fifo __read_only;
++int grsec_enable_execlog __read_only;
++int grsec_enable_signal __read_only;
++int grsec_enable_forkfail __read_only;
++int grsec_enable_audit_ptrace __read_only;
++int grsec_enable_time __read_only;
++int grsec_enable_group __read_only;
++kgid_t grsec_audit_gid __read_only;
++int grsec_enable_chdir __read_only;
++int grsec_enable_mount __read_only;
++int grsec_enable_rofs __read_only;
++int grsec_deny_new_usb __read_only;
++int grsec_enable_chroot_findtask __read_only;
++int grsec_enable_chroot_mount __read_only;
++int grsec_enable_chroot_shmat __read_only;
++int grsec_enable_chroot_fchdir __read_only;
++int grsec_enable_chroot_double __read_only;
++int grsec_enable_chroot_pivot __read_only;
++int grsec_enable_chroot_chdir __read_only;
++int grsec_enable_chroot_chmod __read_only;
++int grsec_enable_chroot_mknod __read_only;
++int grsec_enable_chroot_nice __read_only;
++int grsec_enable_chroot_execlog __read_only;
++int grsec_enable_chroot_caps __read_only;
++int grsec_enable_chroot_rename __read_only;
++int grsec_enable_chroot_sysctl __read_only;
++int grsec_enable_chroot_unix __read_only;
++int grsec_enable_tpe __read_only;
++kgid_t grsec_tpe_gid __read_only;
++int grsec_enable_blackhole __read_only;
+#ifdef CONFIG_IPV6_MODULE
+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
+#endif
-+int grsec_lastack_retries;
-+int grsec_enable_tpe_all;
-+int grsec_enable_tpe_invert;
-+int grsec_enable_socket_all;
-+kgid_t grsec_socket_all_gid;
-+int grsec_enable_socket_client;
-+kgid_t grsec_socket_client_gid;
-+int grsec_enable_socket_server;
-+kgid_t grsec_socket_server_gid;
-+int grsec_resource_logging;
-+int grsec_disable_privio;
-+int grsec_enable_log_rwxmaps;
-+int grsec_lock;
++int grsec_lastack_retries __read_only;
++int grsec_enable_tpe_all __read_only;
++int grsec_enable_tpe_invert __read_only;
++int grsec_enable_socket_all __read_only;
++kgid_t grsec_socket_all_gid __read_only;
++int grsec_enable_socket_client __read_only;
++kgid_t grsec_socket_client_gid __read_only;
++int grsec_enable_socket_server __read_only;
++kgid_t grsec_socket_server_gid __read_only;
++int grsec_resource_logging __read_only;
++int grsec_disable_privio __read_only;
++int grsec_enable_log_rwxmaps __read_only;
++int grsec_lock __read_only;
+
+DEFINE_SPINLOCK(grsec_alert_lock);
+unsigned long grsec_alert_wtime = 0;
@@ -87332,7 +87696,7 @@ index 0000000..a523bd2
+}
diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
new file mode 100644
-index 0000000..cce889e
+index 0000000..aaec43c
--- /dev/null
+++ b/grsecurity/grsec_sysctl.c
@@ -0,0 +1,488 @@
@@ -87371,7 +87735,7 @@ index 0000000..cce889e
+ .data = &grsec_disable_privio,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#endif
@@ -87381,7 +87745,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_link,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
@@ -87390,14 +87754,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_symlinkown,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "symlinkown_gid",
+ .data = &grsec_symlinkown_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
@@ -87406,7 +87770,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_brute,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_FIFO
@@ -87415,7 +87779,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_fifo,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
@@ -87424,7 +87788,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_ptrace_readexec,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
@@ -87433,7 +87797,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_setxid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
@@ -87442,14 +87806,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_blackhole,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "lastack_retries",
+ .data = &grsec_lastack_retries,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECLOG
@@ -87458,7 +87822,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_execlog,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
@@ -87467,7 +87831,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_log_rwxmaps,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SIGNAL
@@ -87476,7 +87840,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_signal,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
@@ -87485,7 +87849,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_forkfail,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TIME
@@ -87494,7 +87858,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_time,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
@@ -87503,7 +87867,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_shmat,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
@@ -87512,7 +87876,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_unix,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
@@ -87521,7 +87885,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_mount,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
@@ -87530,7 +87894,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_fchdir,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
@@ -87539,7 +87903,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_double,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
@@ -87548,7 +87912,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_pivot,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
@@ -87557,7 +87921,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_chdir,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
@@ -87566,7 +87930,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_chmod,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
@@ -87575,7 +87939,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_mknod,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
@@ -87584,7 +87948,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_nice,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
@@ -87593,7 +87957,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_execlog,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
@@ -87602,7 +87966,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_caps,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
@@ -87611,7 +87975,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_rename,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
@@ -87620,7 +87984,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_sysctl,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE
@@ -87629,14 +87993,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_tpe,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "tpe_gid",
+ .data = &grsec_tpe_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
@@ -87645,7 +88009,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_tpe_invert,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
@@ -87654,7 +88018,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_tpe_all,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
@@ -87663,14 +88027,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_socket_all,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "socket_all_gid",
+ .data = &grsec_socket_all_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
@@ -87679,14 +88043,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_socket_client,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "socket_client_gid",
+ .data = &grsec_socket_client_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
@@ -87695,14 +88059,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_socket_server,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "socket_server_gid",
+ .data = &grsec_socket_server_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
@@ -87711,14 +88075,14 @@ index 0000000..cce889e
+ .data = &grsec_enable_group,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "audit_gid",
+ .data = &grsec_audit_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
@@ -87727,7 +88091,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chdir,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
@@ -87736,7 +88100,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_mount,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
@@ -87745,7 +88109,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_dmesg,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
@@ -87754,7 +88118,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_chroot_findtask,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_RESLOG
@@ -87763,7 +88127,7 @@ index 0000000..cce889e
+ .data = &grsec_resource_logging,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
@@ -87772,7 +88136,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_audit_ptrace,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
@@ -87781,7 +88145,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_harden_ptrace,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
@@ -87790,7 +88154,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_harden_ipc,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+ {
@@ -87798,7 +88162,7 @@ index 0000000..cce889e
+ .data = &grsec_lock,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_ROFS
@@ -87807,7 +88171,7 @@ index 0000000..cce889e
+ .data = &grsec_enable_rofs,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_minmax,
++ .proc_handler = &proc_dointvec_minmax_secure,
+ .extra1 = &one,
+ .extra2 = &one,
+ },
@@ -87818,7 +88182,7 @@ index 0000000..cce889e
+ .data = &grsec_deny_new_usb,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+ { }
@@ -89080,8 +89444,31 @@ index 939533d..cf0a57c 100644
/**
* struct clk_init_data - holds init data that's common to all clocks and is
+diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
+index 94bad77..a39e810 100644
+--- a/include/linux/clkdev.h
++++ b/include/linux/clkdev.h
+@@ -32,7 +32,7 @@ struct clk_lookup {
+ }
+
+ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
+- const char *dev_fmt, ...);
++ const char *dev_fmt, ...) __printf(3, 4);
+
+ void clkdev_add(struct clk_lookup *cl);
+ void clkdev_drop(struct clk_lookup *cl);
+@@ -40,7 +40,8 @@ void clkdev_drop(struct clk_lookup *cl);
+ void clkdev_add_table(struct clk_lookup *, size_t);
+ int clk_add_alias(const char *, const char *, char *, struct device *);
+
+-int clk_register_clkdev(struct clk *, const char *, const char *, ...);
++int clk_register_clkdev(struct clk *, const char *, const char *, ...)
++ __printf(3, 4);
+ int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
+
+ #ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/compat.h b/include/linux/compat.h
-index 3f448c6..8dd869d 100644
+index 3f448c6..4d53187 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
@@ -89102,6 +89489,15 @@ index 3f448c6..8dd869d 100644
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+@@ -405,7 +405,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+
+ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
+
+-extern int compat_printk(const char *fmt, ...);
++extern __printf(1, 2) int compat_printk(const char *fmt, ...);
+ extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
+ extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
+
@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -89151,10 +89547,10 @@ index 2507fd2..55203f8 100644
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
-index cdd1cc2..9c1ee22 100644
+index cdd1cc2..d062745 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
-@@ -28,6 +28,31 @@
+@@ -28,6 +28,30 @@
# define __compiletime_error(message) __attribute__((error(message)))
#endif /* __CHECKER__ */
@@ -89174,7 +89570,6 @@ index cdd1cc2..9c1ee22 100644
+#endif
+
+#ifdef SIZE_OVERFLOW_PLUGIN
-+#error not yet
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
+#endif
@@ -89186,7 +89581,7 @@ index cdd1cc2..9c1ee22 100644
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
-@@ -53,7 +78,6 @@
+@@ -53,7 +77,6 @@
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
*
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
@@ -89368,19 +89763,20 @@ index 5d5aaae..0ea9b84 100644
extern bool completion_done(struct completion *x);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
-index 34025df..2a6ee32 100644
+index 34025df..9c263df 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
-@@ -64,7 +64,7 @@ struct config_item {
+@@ -64,7 +64,8 @@ struct config_item {
struct dentry *ci_dentry;
};
-extern int config_item_set_name(struct config_item *, const char *, ...);
-+extern __printf(2, 3) int config_item_set_name(struct config_item *, const char *, ...);
++extern __printf(2, 3)
++int config_item_set_name(struct config_item *, const char *, ...);
static inline char *config_item_name(struct config_item * item)
{
-@@ -125,7 +125,7 @@ struct configfs_attribute {
+@@ -125,7 +126,7 @@ struct configfs_attribute {
const char *ca_name;
struct module *ca_owner;
umode_t ca_mode;
@@ -89568,7 +89964,7 @@ index 653589e..4ef254a 100644
return c | 0x20;
}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index 0f0eb1c..776283e 100644
+index 0f0eb1c..3c17a3d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -123,6 +123,9 @@ struct dentry {
@@ -89590,6 +89986,16 @@ index 0f0eb1c..776283e 100644
/*
* dentry->d_lock spinlock nesting subclasses:
+@@ -328,7 +331,8 @@ extern int d_validate(struct dentry *, struct dentry *);
+ /*
+ * helper function for dentry_operations.d_dname() members
+ */
+-extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
++extern __printf(4, 5)
++char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+ extern char *simple_dname(struct dentry *, char *, int);
+
+ extern char *__d_path(const struct path *, const struct path *, char *, int);
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 7925bf0..d5143d2 100644
--- a/include/linux/decompress/mm.h
@@ -89617,7 +90023,7 @@ index d48dc00..211ee54 100644
/**
* struct devfreq - Device devfreq structure
diff --git a/include/linux/device.h b/include/linux/device.h
-index 952b010..d5b7691 100644
+index 952b010..2f65744 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -310,7 +310,7 @@ struct subsys_interface {
@@ -89652,6 +90058,23 @@ index 952b010..d5b7691 100644
ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
char *buf);
+@@ -956,12 +957,10 @@ extern int __must_check device_reprobe(struct device *dev);
+ /*
+ * Easy functions for dynamically creating devices on the fly
+ */
+-extern struct device *device_create_vargs(struct class *cls,
+- struct device *parent,
+- dev_t devt,
+- void *drvdata,
+- const char *fmt,
+- va_list vargs);
++extern __printf(5, 0)
++struct device *device_create_vargs(struct class *cls, struct device *parent,
++ dev_t devt, void *drvdata,
++ const char *fmt, va_list vargs);
+ extern __printf(5, 6)
+ struct device *device_create(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index fd4aee2..1f28db9 100644
--- a/include/linux/dma-mapping.h
@@ -91739,6 +92162,42 @@ index 6883e19..e854fcb 100644
/* This macro allows us to keep printk typechecking */
static __printf(1, 2)
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 196d1ea..86a6927 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -397,7 +397,8 @@ extern __printf(3, 0)
+ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+ extern __printf(2, 3)
+ char *kasprintf(gfp_t gfp, const char *fmt, ...);
+-extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
++extern __printf(2, 0)
++char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+
+ extern __scanf(2, 3)
+ int sscanf(const char *, const char *, ...);
+@@ -670,10 +671,10 @@ do { \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+ } while (0)
+
+-extern int
++extern __printf(2, 0) int
+ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+-extern int
++extern __printf(2, 0) int
+ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
+@@ -693,7 +694,7 @@ int trace_printk(const char *fmt, ...)
+ {
+ return 0;
+ }
+-static inline int
++static __printf(1, 0) inline int
+ ftrace_vprintk(const char *fmt, va_list ap)
+ {
+ return 0;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index a74c3a8..28d3f21 100644
--- a/include/linux/key-type.h
@@ -91807,10 +92266,22 @@ index 0555cc6..40116ce 100644
char **envp;
int wait;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
-index 926afb6..58dd6e5 100644
+index 926afb6..e4d3dd9 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
-@@ -116,7 +116,7 @@ struct kobj_type {
+@@ -78,8 +78,9 @@ struct kobject {
+
+ extern __printf(2, 3)
+ int kobject_set_name(struct kobject *kobj, const char *name, ...);
+-extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+- va_list vargs);
++extern __printf(2, 0)
++int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
++ va_list vargs);
+
+ static inline const char *kobject_name(const struct kobject *kobj)
+ {
+@@ -116,7 +117,7 @@ struct kobj_type {
struct attribute **default_attrs;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
@@ -91819,7 +92290,7 @@ index 926afb6..58dd6e5 100644
struct kobj_uevent_env {
char *envp[UEVENT_NUM_ENVP];
-@@ -139,6 +139,7 @@ struct kobj_attribute {
+@@ -139,6 +140,7 @@ struct kobj_attribute {
ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
};
@@ -91827,7 +92298,7 @@ index 926afb6..58dd6e5 100644
extern const struct sysfs_ops kobj_sysfs_ops;
-@@ -166,7 +167,7 @@ struct kset {
+@@ -166,7 +168,7 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
@@ -92388,7 +92859,7 @@ index 87079fc..7724b1f 100644
/*
* Standard errno values are used for errors, but some have specific
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
-index c5d5278..f0b68c8 100644
+index c5d5278..85cd5ce 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
@@ -92409,6 +92880,14 @@ index c5d5278..f0b68c8 100644
{
}
+@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
+ extern void disable_mmiotrace(void);
+ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
+ extern void mmio_trace_mapping(struct mmiotrace_map *map);
+-extern int mmio_trace_printk(const char *fmt, va_list args);
++extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
+
+ #endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ac819bf..838afec 100644
--- a/include/linux/mmzone.h
@@ -93159,7 +93638,7 @@ index 1841b58..fbeebf8 100644
#define preempt_set_need_resched() \
do { \
diff --git a/include/linux/printk.h b/include/linux/printk.h
-index cbf094f..86007b7 100644
+index cbf094f..630d761 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -114,6 +114,8 @@ static inline __printf(1, 2) __cold
@@ -93171,7 +93650,7 @@ index cbf094f..86007b7 100644
#ifdef CONFIG_PRINTK
asmlinkage __printf(5, 0)
int vprintk_emit(int facility, int level,
-@@ -148,7 +150,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+@@ -148,13 +150,12 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
@@ -93179,6 +93658,22 @@ index cbf094f..86007b7 100644
extern void wake_up_klogd(void);
+ void log_buf_kexec_setup(void);
+ void __init setup_log_buf(int early);
+-void dump_stack_set_arch_desc(const char *fmt, ...);
++__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
+ void dump_stack_print_info(const char *log_lvl);
+ void show_regs_print_info(const char *log_lvl);
+ #else
+@@ -195,7 +196,7 @@ static inline void setup_log_buf(int early)
+ {
+ }
+
+-static inline void dump_stack_set_arch_desc(const char *fmt, ...)
++static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
+ {
+ }
+
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 608e60a..bbcb1a0 100644
--- a/include/linux/proc_fs.h
@@ -94377,10 +94872,10 @@ index 27b3b0b..e093dd9 100644
extern void register_syscore_ops(struct syscore_ops *ops);
extern void unregister_syscore_ops(struct syscore_ops *ops);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
-index 14a8ff2..fa95f3a 100644
+index 14a8ff2..65dc1e2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
-@@ -34,13 +34,13 @@ struct ctl_table_root;
+@@ -34,17 +34,21 @@ struct ctl_table_root;
struct ctl_table_header;
struct ctl_dir;
@@ -94395,8 +94890,16 @@ index 14a8ff2..fa95f3a 100644
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
++extern int proc_dointvec_secure(struct ctl_table *, int,
++ void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
-@@ -115,7 +115,9 @@ struct ctl_table
+ void __user *, size_t *, loff_t *);
++extern int proc_dointvec_minmax_secure(struct ctl_table *, int,
++ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_jiffies(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
+@@ -115,7 +119,9 @@ struct ctl_table
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
@@ -97574,10 +98077,10 @@ index c18b1f1..b9a0132 100644
return -ENOMEM;
diff --git a/kernel/cred.c b/kernel/cred.c
-index e0573a4..20fb164 100644
+index e0573a4..3907beb 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
-@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
+@@ -164,6 +164,15 @@ void exit_creds(struct task_struct *tsk)
validate_creds(cred);
alter_cred_subscribers(cred, -1);
put_cred(cred);
@@ -97587,14 +98090,13 @@ index e0573a4..20fb164 100644
+ if (cred != NULL) {
+ tsk->delayed_cred = NULL;
+ validate_creds(cred);
-+ alter_cred_subscribers(cred, -1);
+ put_cred(cred);
+ }
+#endif
}
/**
-@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
+@@ -411,7 +420,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
* Always returns 0 thus allowing this function to be tail-called at the end
* of, say, sys_setgid().
*/
@@ -97603,7 +98105,7 @@ index e0573a4..20fb164 100644
{
struct task_struct *task = current;
const struct cred *old = task->real_cred;
-@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
+@@ -430,6 +439,8 @@ int commit_creds(struct cred *new)
get_cred(new); /* we will require a ref for the subj creds too */
@@ -97612,7 +98114,7 @@ index e0573a4..20fb164 100644
/* dumpability changes */
if (!uid_eq(old->euid, new->euid) ||
!gid_eq(old->egid, new->egid) ||
-@@ -479,6 +491,108 @@ int commit_creds(struct cred *new)
+@@ -479,6 +490,108 @@ int commit_creds(struct cred *new)
put_cred(old);
return 0;
}
@@ -97808,7 +98310,7 @@ index 449518e..2658dd6 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 60146fe..2e89117 100644
+index 60146fe..7037710 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -159,8 +159,15 @@ static struct srcu_struct pmus_srcu;
@@ -97819,11 +98321,11 @@ index 60146fe..2e89117 100644
*/
-int sysctl_perf_event_paranoid __read_mostly = 1;
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
-+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
++int sysctl_perf_event_legitimately_concerned __read_only = 3;
+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
-+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
++int sysctl_perf_event_legitimately_concerned __read_only = 2;
+#else
-+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
++int sysctl_perf_event_legitimately_concerned __read_only = 1;
+#endif
/* Minimum for 512 kiB + 1 user control page */
@@ -99388,7 +99890,7 @@ index 1d96dd0..994ff19 100644
default:
diff --git a/kernel/module.c b/kernel/module.c
-index 1d679a6..acc7443 100644
+index 1d679a6..b67e85e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -61,6 +61,7 @@
@@ -99399,6 +99901,15 @@ index 1d679a6..acc7443 100644
#include <uapi/linux/module.h>
#include "module-internal.h"
+@@ -147,7 +148,7 @@ module_param(sig_enforce, bool_enable_only, 0644);
+ #endif /* CONFIG_MODULE_SIG */
+
+ /* Block module loading/unloading? */
+-int modules_disabled = 0;
++int modules_disabled __read_only = 0;
+ core_param(nomodule, modules_disabled, bint, 0);
+
+ /* Waiting for a module to finish initializing? */
@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
/* Bounds of module allocation, for speeding __module_address.
@@ -100607,9 +101118,21 @@ index f1fe7ec..7d4e641 100644
if (pm_wakeup_pending()) {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index a755ad7..bf7e534 100644
+index a755ad7..ba98f34 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
+@@ -359,9 +359,9 @@ static void log_store(int facility, int level,
+ }
+
+ #ifdef CONFIG_SECURITY_DMESG_RESTRICT
+-int dmesg_restrict = 1;
++int dmesg_restrict __read_only = 1;
+ #else
+-int dmesg_restrict;
++int dmesg_restrict __read_only;
+ #endif
+
+ static int syslog_action_restricted(int type)
@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
if (from_file && type != SYSLOG_ACTION_OPEN)
return 0;
@@ -102248,7 +102771,7 @@ index c0a58be..95e292b 100644
if (!retval) {
if (old_rlim)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index c1b26e1..bc7b50d 100644
+index c1b26e1..947cae6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -94,7 +94,6 @@
@@ -102259,7 +102782,11 @@ index c1b26e1..bc7b50d 100644
/* External variables not in a header file. */
extern int max_threads;
extern int suid_dumpable;
-@@ -118,19 +117,18 @@ extern int blk_iopoll_enabled;
+@@ -115,22 +114,22 @@ extern int sysctl_nr_trim_pages;
+ #ifdef CONFIG_BLOCK
+ extern int blk_iopoll_enabled;
+ #endif
++extern int sysctl_modify_ldt;
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
@@ -102288,18 +102815,19 @@ index c1b26e1..bc7b50d 100644
#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
-@@ -181,10 +179,8 @@ static int proc_taint(struct ctl_table *table, int write,
+@@ -181,10 +180,8 @@ static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
-#ifdef CONFIG_PRINTK
- static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-@@ -215,6 +211,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
+@@ -215,6 +212,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
#endif
@@ -102308,7 +102836,7 @@ index c1b26e1..bc7b50d 100644
static struct ctl_table kern_table[];
static struct ctl_table vm_table[];
static struct ctl_table fs_table[];
-@@ -229,6 +227,20 @@ extern struct ctl_table epoll_table[];
+@@ -229,6 +228,20 @@ extern struct ctl_table epoll_table[];
int sysctl_legacy_va_layout;
#endif
@@ -102329,7 +102857,7 @@ index c1b26e1..bc7b50d 100644
/* The default sysctl tables: */
static struct ctl_table sysctl_base_table[] = {
-@@ -277,6 +289,22 @@ static int max_extfrag_threshold = 1000;
+@@ -277,6 +290,22 @@ static int max_extfrag_threshold = 1000;
#endif
static struct ctl_table kern_table[] = {
@@ -102352,7 +102880,7 @@ index c1b26e1..bc7b50d 100644
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
-@@ -639,7 +667,7 @@ static struct ctl_table kern_table[] = {
+@@ -639,7 +668,7 @@ static struct ctl_table kern_table[] = {
.data = &modprobe_path,
.maxlen = KMOD_PATH_LEN,
.mode = 0644,
@@ -102361,7 +102889,21 @@ index c1b26e1..bc7b50d 100644
},
{
.procname = "modules_disabled",
-@@ -806,16 +834,20 @@ static struct ctl_table kern_table[] = {
+@@ -647,7 +676,7 @@ static struct ctl_table kern_table[] = {
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ /* only handle a transition from default "0" to "1" */
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_dointvec_minmax_secure,
+ .extra1 = &one,
+ .extra2 = &one,
+ },
+@@ -802,20 +831,24 @@ static struct ctl_table kern_table[] = {
+ .data = &dmesg_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax_sysadmin,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
.extra1 = &zero,
.extra2 = &one,
},
@@ -102371,7 +102913,8 @@ index c1b26e1..bc7b50d 100644
.data = &kptr_restrict,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax_sysadmin,
+- .proc_handler = proc_dointvec_minmax_sysadmin,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+ .extra1 = &two,
+#else
@@ -102383,7 +102926,23 @@ index c1b26e1..bc7b50d 100644
{
.procname = "ngroups_max",
.data = &ngroups_max,
-@@ -1060,10 +1092,17 @@ static struct ctl_table kern_table[] = {
+@@ -929,6 +962,15 @@ static struct ctl_table kern_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
++ {
++ .procname = "modify_ldt",
++ .data = &sysctl_modify_ldt,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
+ #endif
+ #if defined(CONFIG_MMU)
+ {
+@@ -1060,10 +1102,17 @@ static struct ctl_table kern_table[] = {
*/
{
.procname = "perf_event_paranoid",
@@ -102394,7 +102953,7 @@ index c1b26e1..bc7b50d 100644
.mode = 0644,
- .proc_handler = proc_dointvec,
+ /* go ahead, be a hero */
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
+ .extra1 = &neg_one,
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+ .extra2 = &three,
@@ -102404,7 +102963,7 @@ index c1b26e1..bc7b50d 100644
},
{
.procname = "perf_event_mlock_kb",
-@@ -1334,6 +1373,13 @@ static struct ctl_table vm_table[] = {
+@@ -1334,6 +1383,13 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
@@ -102418,7 +102977,7 @@ index c1b26e1..bc7b50d 100644
#else
{
.procname = "nr_trim_pages",
-@@ -1798,6 +1844,16 @@ int proc_dostring(struct ctl_table *table, int write,
+@@ -1798,6 +1854,16 @@ int proc_dostring(struct ctl_table *table, int write,
buffer, lenp, ppos);
}
@@ -102435,7 +102994,7 @@ index c1b26e1..bc7b50d 100644
static size_t proc_skip_spaces(char **buf)
{
size_t ret;
-@@ -1903,6 +1959,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
+@@ -1903,6 +1969,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
len = strlen(tmp);
if (len > *size)
len = *size;
@@ -102444,7 +103003,52 @@ index c1b26e1..bc7b50d 100644
if (copy_to_user(*buf, tmp, len))
return -EFAULT;
*size -= len;
-@@ -2067,7 +2125,7 @@ int proc_dointvec(struct ctl_table *table, int write,
+@@ -2060,6 +2128,44 @@ int proc_dointvec(struct ctl_table *table, int write,
+ NULL,NULL);
+ }
+
++static int do_proc_dointvec_conv_secure(bool *negp, unsigned long *lvalp,
++ int *valp,
++ int write, void *data)
++{
++ if (write) {
++ if (*negp) {
++ if (*lvalp > (unsigned long) INT_MAX + 1)
++ return -EINVAL;
++ pax_open_kernel();
++ *valp = -*lvalp;
++ pax_close_kernel();
++ } else {
++ if (*lvalp > (unsigned long) INT_MAX)
++ return -EINVAL;
++ pax_open_kernel();
++ *valp = *lvalp;
++ pax_close_kernel();
++ }
++ } else {
++ int val = *valp;
++ if (val < 0) {
++ *negp = true;
++ *lvalp = (unsigned long)-val;
++ } else {
++ *negp = false;
++ *lvalp = (unsigned long)val;
++ }
++ }
++ return 0;
++}
++
++int proc_dointvec_secure(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ return do_proc_dointvec(table,write,buffer,lenp,ppos,
++ do_proc_dointvec_conv_secure,NULL);
++}
++
+ /*
+ * Taint values can only be increased
+ * This means we can safely use a temporary.
+@@ -2067,7 +2173,7 @@ int proc_dointvec(struct ctl_table *table, int write,
static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -102453,23 +103057,77 @@ index c1b26e1..bc7b50d 100644
unsigned long tmptaint = get_taint();
int err;
-@@ -2095,7 +2153,6 @@ static int proc_taint(struct ctl_table *table, int write,
+@@ -2095,16 +2201,14 @@ static int proc_taint(struct ctl_table *table, int write,
return err;
}
-#ifdef CONFIG_PRINTK
- static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
-@@ -2104,7 +2161,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++ return proc_dointvec_minmax_secure(table, write, buffer, lenp, ppos);
}
-#endif
struct do_proc_dointvec_minmax_conv_param {
int *min;
-@@ -2651,6 +2707,12 @@ int proc_dostring(struct ctl_table *table, int write,
+@@ -2135,6 +2239,32 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
+ return 0;
+ }
+
++static int do_proc_dointvec_minmax_conv_secure(bool *negp, unsigned long *lvalp,
++ int *valp,
++ int write, void *data)
++{
++ struct do_proc_dointvec_minmax_conv_param *param = data;
++ if (write) {
++ int val = *negp ? -*lvalp : *lvalp;
++ if ((param->min && *param->min > val) ||
++ (param->max && *param->max < val))
++ return -EINVAL;
++ pax_open_kernel();
++ *valp = val;
++ pax_close_kernel();
++ } else {
++ int val = *valp;
++ if (val < 0) {
++ *negp = true;
++ *lvalp = (unsigned long)-val;
++ } else {
++ *negp = false;
++ *lvalp = (unsigned long)val;
++ }
++ }
++ return 0;
++}
++
+ /**
+ * proc_dointvec_minmax - read a vector of integers with min/max values
+ * @table: the sysctl table
+@@ -2162,6 +2292,17 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
+ do_proc_dointvec_minmax_conv, &param);
+ }
+
++int proc_dointvec_minmax_secure(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct do_proc_dointvec_minmax_conv_param param = {
++ .min = (int *) table->extra1,
++ .max = (int *) table->extra2,
++ };
++ return do_proc_dointvec(table, write, buffer, lenp, ppos,
++ do_proc_dointvec_minmax_conv_secure, &param);
++}
++
+ static void validate_coredump_safety(void)
+ {
+ #ifdef CONFIG_COREDUMP
+@@ -2651,6 +2792,12 @@ int proc_dostring(struct ctl_table *table, int write,
return -ENOSYS;
}
@@ -102482,7 +103140,7 @@ index c1b26e1..bc7b50d 100644
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
-@@ -2707,5 +2769,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+@@ -2707,5 +2854,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
EXPORT_SYMBOL(proc_dostring);
@@ -103832,10 +104490,22 @@ index bd2bea9..6b3c95e 100644
return false;
diff --git a/lib/kobject.c b/lib/kobject.c
-index cb14aea..8c53cdb 100644
+index cb14aea..7ae9777 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
-@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+@@ -340,8 +340,9 @@ error:
+ }
+ EXPORT_SYMBOL(kobject_init);
+
+-static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
+- const char *fmt, va_list vargs)
++static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
++ struct kobject *parent,
++ const char *fmt, va_list vargs)
+ {
+ int retval;
+
+@@ -931,9 +932,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
@@ -104334,7 +105004,7 @@ index 4f5b1dd..7cab418 100644
+}
+EXPORT_SYMBOL(copy_to_user_overflow);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index 185b6d3..823c48c 100644
+index 185b6d3..c82c105 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -16,6 +16,9 @@
@@ -104351,10 +105021,11 @@ index 185b6d3..823c48c 100644
return number(buf, end, num, spec);
}
+-int kptr_restrict __read_mostly;
+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+int kptr_restrict __read_mostly = 2;
++int kptr_restrict __read_only = 2;
+#else
- int kptr_restrict __read_mostly;
++int kptr_restrict __read_only;
+#endif
/*
@@ -109843,6 +110514,18 @@ index 0447d5d..3cf4728 100644
__AAL_STAT_ITEMS
#undef __HANDLE_ITEM
}
+diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
+index 1997538..3b78e84 100644
+--- a/net/ax25/ax25_subr.c
++++ b/net/ax25/ax25_subr.c
+@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
+ {
+ ax25_clear_queues(ax25);
+
++ ax25_stop_heartbeat(ax25);
+ ax25_stop_t1timer(ax25);
+ ax25_stop_t2timer(ax25);
+ ax25_stop_t3timer(ax25);
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index 919a5ce..cc6b444 100644
--- a/net/ax25/sysctl_net_ax25.c
@@ -110513,10 +111196,87 @@ index d125290..e86e034 100644
a0 = a[0];
a1 = a[1];
diff --git a/net/core/datagram.c b/net/core/datagram.c
-index a16ed7b..eb44d17 100644
+index a16ed7b..689402b 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
-@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+@@ -130,6 +130,35 @@ out_noerr:
+ goto out;
+ }
+
++static int skb_set_peeked(struct sk_buff *skb)
++{
++ struct sk_buff *nskb;
++
++ if (skb->peeked)
++ return 0;
++
++ /* We have to unshare an skb before modifying it. */
++ if (!skb_shared(skb))
++ goto done;
++
++ nskb = skb_clone(skb, GFP_ATOMIC);
++ if (!nskb)
++ return -ENOMEM;
++
++ skb->prev->next = nskb;
++ skb->next->prev = nskb;
++ nskb->prev = skb->prev;
++ nskb->next = skb->next;
++
++ consume_skb(skb);
++ skb = nskb;
++
++done:
++ skb->peeked = 1;
++
++ return 0;
++}
++
+ /**
+ * __skb_recv_datagram - Receive a datagram skbuff
+ * @sk: socket
+@@ -164,7 +193,9 @@ out_noerr:
+ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ int *peeked, int *off, int *err)
+ {
++ struct sk_buff_head *queue = &sk->sk_receive_queue;
+ struct sk_buff *skb, *last;
++ unsigned long cpu_flags;
+ long timeo;
+ /*
+ * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
+@@ -183,8 +214,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ * Look at current nfs client by the way...
+ * However, this function was correct in any case. 8)
+ */
+- unsigned long cpu_flags;
+- struct sk_buff_head *queue = &sk->sk_receive_queue;
+ int _off = *off;
+
+ last = (struct sk_buff *)queue;
+@@ -198,7 +227,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ _off -= skb->len;
+ continue;
+ }
+- skb->peeked = 1;
++
++ error = skb_set_peeked(skb);
++ if (error)
++ goto unlock_err;
++
+ atomic_inc(&skb->users);
+ } else
+ __skb_unlink(skb, queue);
+@@ -222,6 +255,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+
+ return NULL;
+
++unlock_err:
++ spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ no_packet:
+ *err = error;
+ return NULL;
+@@ -301,7 +336,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
kfree_skb(skb);
@@ -110645,6 +111405,21 @@ index cf999e0..c59a9754 100644
}
}
EXPORT_SYMBOL(dev_load);
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 15b6792..0d70357 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -282,7 +282,9 @@ void dst_release(struct dst_entry *dst)
+ int newrefcnt;
+
+ newrefcnt = atomic_dec_return(&dst->__refcnt);
+- WARN_ON(newrefcnt < 0);
++ if (unlikely(newrefcnt < 0))
++ net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
++ __func__, dst, newrefcnt);
+ if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+ call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ }
diff --git a/net/core/filter.c b/net/core/filter.c
index ebce437..9fed9d0 100644
--- a/net/core/filter.c
@@ -110942,7 +111717,7 @@ index ca68d32..236499d 100644
pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 8aadd6a..adf3f59 100644
+index 8aadd6a..5063f2a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -58,7 +58,7 @@ struct rtnl_link {
@@ -110980,7 +111755,25 @@ index 8aadd6a..adf3f59 100644
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
-@@ -2006,6 +2009,10 @@ replay:
+@@ -1605,10 +1608,13 @@ static int do_setlink(const struct sk_buff *skb,
+ goto errout;
+
+ nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
+- if (nla_type(attr) != IFLA_VF_PORT)
+- continue;
+- err = nla_parse_nested(port, IFLA_PORT_MAX,
+- attr, ifla_port_policy);
++ if (nla_type(attr) != IFLA_VF_PORT ||
++ nla_len(attr) < NLA_HDRLEN) {
++ err = -EINVAL;
++ goto errout;
++ }
++ err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
++ ifla_port_policy);
+ if (err < 0)
+ goto errout;
+ if (!port[IFLA_PORT_VF]) {
+@@ -2006,6 +2012,10 @@ replay:
if (IS_ERR(dest_net))
return PTR_ERR(dest_net);
@@ -110991,7 +111784,7 @@ index 8aadd6a..adf3f59 100644
dev = rtnl_create_link(dest_net, ifname, ops, tb);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
-@@ -2693,6 +2700,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2693,6 +2703,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -111001,7 +111794,7 @@ index 8aadd6a..adf3f59 100644
have_flags = true;
flags = nla_get_u16(attr);
break;
-@@ -2763,6 +2773,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
+@@ -2763,6 +2776,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
@@ -115635,6 +116428,35 @@ index 8e3cf49..4a8e322 100644
}
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index ba5bc92..1232a25 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -167,6 +167,15 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
+ return idx;
+ }
+
++static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
++{
++ unsigned int prev_backlog;
++
++ prev_backlog = sch->qstats.backlog;
++ fq_codel_drop(sch);
++ return prev_backlog - sch->qstats.backlog;
++}
++
+ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+@@ -600,7 +609,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
+ .enqueue = fq_codel_enqueue,
+ .dequeue = fq_codel_dequeue,
+ .peek = qdisc_peek_dequeued,
+- .drop = fq_codel_drop,
++ .drop = fq_codel_qdisc_drop,
+ .init = fq_codel_init,
+ .reset = fq_codel_reset,
+ .destroy = fq_codel_destroy,
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2b1738e..a9d0fc9 100644
--- a/net/sctp/ipv6.c
@@ -120467,10 +121289,10 @@ index 0000000..0c96d8a
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..da184c5
+index 0000000..c5de280
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,564 @@
+@@ -0,0 +1,568 @@
+/*
+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
+ * Copyright 2011-2015 by PaX Team <pageexec@freemail.hu>
@@ -120912,7 +121734,7 @@ index 0000000..da184c5
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION >= 4009
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -120937,7 +121759,11 @@ index 0000000..da184c5
+class check_local_variables_pass : public gimple_opt_pass {
+public:
+ check_local_variables_pass() : gimple_opt_pass(check_local_variables_pass_data, g) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return check_local_variables(); }
++#else
+ unsigned int execute() { return check_local_variables(); }
++#endif
+};
+}
+
@@ -121037,10 +121863,10 @@ index 0000000..da184c5
+}
diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
new file mode 100644
-index 0000000..1d20e32
+index 0000000..70924d4
--- /dev/null
+++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,689 @@
+@@ -0,0 +1,787 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
@@ -121119,6 +121945,8 @@ index 0000000..1d20e32
+#include "tree-flow.h"
+#else
+#include "tree-cfgcleanup.h"
++#include "tree-ssa-operands.h"
++#include "tree-into-ssa.h"
+#endif
+
+#if BUILDING_GCC_VERSION >= 4008
@@ -121449,6 +122277,76 @@ index 0000000..1d20e32
+typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d gphi;
+typedef union gimple_statement_d greturn;
++
++static inline gasm *as_a_gasm(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gasm *as_a_const_gasm(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gassign *as_a_gassign(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gassign *as_a_const_gassign(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gcall *as_a_gcall(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gcall *as_a_const_gcall(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gcond *as_a_gcond(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gcond *as_a_const_gcond(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gdebug *as_a_gdebug(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gphi *as_a_gphi(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gphi *as_a_const_gphi(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline greturn *as_a_greturn(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const greturn *as_a_const_greturn(const_gimple stmt)
++{
++ return stmt;
++}
+#endif
+
+#if BUILDING_GCC_VERSION == 4008
@@ -121468,34 +122366,35 @@ index 0000000..1d20e32
+#if BUILDING_GCC_VERSION <= 4009
+#define TODO_verify_il 0
+#define AVAIL_INTERPOSABLE AVAIL_OVERWRITABLE
-+#endif
+
-+#if BUILDING_GCC_VERSION == 4009
-+typedef struct gimple_statement_base gasm;
-+typedef struct gimple_statement_base gassign;
-+typedef struct gimple_statement_base gcall;
-+typedef struct gimple_statement_base gcond;
-+typedef struct gimple_statement_base gdebug;
-+typedef struct gimple_statement_base gphi;
-+typedef struct gimple_statement_base greturn;
-+#endif
++#define section_name_prefix LTO_SECTION_NAME_PREFIX
++#define fatal_error(loc, gmsgid, ...) fatal_error((gmsgid), __VA_ARGS__)
+
-+#if BUILDING_GCC_VERSION <= 4009
+typedef struct rtx_def rtx_insn;
+
+static inline void set_decl_section_name(tree node, const char *value)
+{
+ DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value);
+}
++#endif
++
++#if BUILDING_GCC_VERSION == 4009
++typedef struct gimple_statement_asm gasm;
++typedef struct gimple_statement_base gassign;
++typedef struct gimple_statement_call gcall;
++typedef struct gimple_statement_base gcond;
++typedef struct gimple_statement_base gdebug;
++typedef struct gimple_statement_phi gphi;
++typedef struct gimple_statement_base greturn;
+
+static inline gasm *as_a_gasm(gimple stmt)
+{
-+ return stmt;
++ return as_a<gasm>(stmt);
+}
+
+static inline const gasm *as_a_const_gasm(const_gimple stmt)
+{
-+ return stmt;
++ return as_a<const gasm>(stmt);
+}
+
+static inline gassign *as_a_gassign(gimple stmt)
@@ -121510,24 +122409,44 @@ index 0000000..1d20e32
+
+static inline gcall *as_a_gcall(gimple stmt)
+{
-+ return stmt;
++ return as_a<gcall>(stmt);
+}
+
+static inline const gcall *as_a_const_gcall(const_gimple stmt)
+{
++ return as_a<const gcall>(stmt);
++}
++
++static inline gcond *as_a_gcond(gimple stmt)
++{
+ return stmt;
+}
+
-+static inline gphi *as_a_gphi(gimple stmt)
++static inline const gcond *as_a_const_gcond(const_gimple stmt)
+{
+ return stmt;
+}
+
-+static inline const gphi *as_a_const_gphi(const_gimple stmt)
++static inline gdebug *as_a_gdebug(gimple stmt)
+{
+ return stmt;
+}
+
++static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gphi *as_a_gphi(gimple stmt)
++{
++ return as_a<gphi>(stmt);
++}
++
++static inline const gphi *as_a_const_gphi(const_gimple stmt)
++{
++ return as_a<const gphi>(stmt);
++}
++
+static inline greturn *as_a_greturn(gimple stmt)
+{
+ return stmt;
@@ -121589,6 +122508,11 @@ index 0000000..1d20e32
+ varpool_node::add(decl);
+}
+
++static inline unsigned int rebuild_cgraph_edges(void)
++{
++ return cgraph_edge::rebuild_edges();
++}
++
+static inline cgraph_node_ptr cgraph_function_node(cgraph_node_ptr node, enum availability *availability)
+{
+ return node->function_symbol(availability);
@@ -122973,10 +123897,10 @@ index 0000000..ac6f9b4
+}
diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
new file mode 100644
-index 0000000..713be61
+index 0000000..40dcfa9
--- /dev/null
+++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,918 @@
+@@ -0,0 +1,922 @@
+/*
+ * Copyright 2014,2015 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
+ * and PaX Team <pageexec@freemail.hu>
@@ -123822,7 +124746,11 @@ index 0000000..713be61
+class randomize_layout_bad_cast : public gimple_opt_pass {
+public:
+ randomize_layout_bad_cast() : gimple_opt_pass(randomize_layout_bad_cast_data, g) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return find_bad_casts(); }
++#else
+ unsigned int execute() { return find_bad_casts(); }
++#endif
+};
+}
+#endif
@@ -124039,15 +124967,16 @@ index 0000000..12b1e3b
+exit 0
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
new file mode 100644
-index 0000000..c43901f
+index 0000000..495983ff
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
-@@ -0,0 +1,748 @@
+@@ -0,0 +1,762 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -124065,8 +124994,8 @@ index 0000000..c43901f
+#include "gcc-common.h"
+#include "size_overflow.h"
+
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
++static void search_size_overflow_attribute(gimple_set *visited, tree lhs);
++static enum mark search_intentional(gimple_set *visited, const_tree lhs);
+
+// data for the size_overflow asm stmt
+struct asm_data {
@@ -124100,7 +125029,7 @@ index 0000000..c43901f
+
+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
+{
-+ gimple asm_stmt;
++ gasm *asm_stmt;
+ gimple_stmt_iterator gsi;
+#if BUILDING_GCC_VERSION <= 4007
+ VEC(tree, gc) *input, *output = NULL;
@@ -124113,7 +125042,7 @@ index 0000000..c43901f
+ if (asm_data->output)
+ output = create_asm_io_list(str_output, asm_data->output);
+
-+ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
++ asm_stmt = as_a_gasm(gimple_build_asm_vec(str, input, output, NULL, NULL));
+ gsi = gsi_for_stmt(asm_data->def_stmt);
+ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
+
@@ -124128,13 +125057,13 @@ index 0000000..c43901f
+ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
+}
+
-+static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
++static enum mark search_intentional_phi(gimple_set *visited, const_tree result)
+{
+ enum mark cur_fndecl_attr;
-+ gimple phi = get_def_stmt(result);
++ gphi *phi = as_a_gphi(get_def_stmt(result));
+ unsigned int i, n = gimple_phi_num_args(phi);
+
-+ pointer_set_insert(visited, phi);
++ pointer_set_insert(visited, (gimple)phi);
+ for (i = 0; i < n; i++) {
+ tree arg = gimple_phi_arg_def(phi, i);
+
@@ -124145,11 +125074,11 @@ index 0000000..c43901f
+ return MARK_NO;
+}
+
-+static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
++static enum mark search_intentional_binary(gimple_set *visited, const_tree lhs)
+{
+ enum mark cur_fndecl_attr;
+ const_tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+
+ rhs1 = gimple_assign_rhs1(def_stmt);
+ rhs2 = gimple_assign_rhs2(def_stmt);
@@ -124161,7 +125090,7 @@ index 0000000..c43901f
+}
+
+// Look up the intentional_overflow attribute on the caller and the callee functions.
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
++static enum mark search_intentional(gimple_set *visited, const_tree lhs)
+{
+ const_gimple def_stmt;
+
@@ -124179,7 +125108,7 @@ index 0000000..c43901f
+ case GIMPLE_NOP:
+ return search_intentional(visited, SSA_NAME_VAR(lhs));
+ case GIMPLE_ASM:
-+ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ if (is_size_overflow_intentional_asm_turn_off(as_a_const_gasm(def_stmt)))
+ return MARK_TURN_OFF;
+ return MARK_NO;
+ case GIMPLE_CALL:
@@ -124189,7 +125118,7 @@ index 0000000..c43901f
+ case GIMPLE_ASSIGN:
+ switch (gimple_num_ops(def_stmt)) {
+ case 2:
-+ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
++ return search_intentional(visited, gimple_assign_rhs1(as_a_const_gassign(def_stmt)));
+ case 3:
+ return search_intentional_binary(visited, lhs);
+ }
@@ -124206,7 +125135,7 @@ index 0000000..c43901f
+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
+{
+ const_tree fndecl;
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
+
+ fndecl = get_interesting_orig_fndecl(stmt, argnum);
@@ -124249,7 +125178,7 @@ index 0000000..c43901f
+ is_missing_function(orig_fndecl, num);
+}
+
-+static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
++static void search_size_overflow_attribute_phi(gimple_set *visited, const_tree result)
+{
+ gimple phi = get_def_stmt(result);
+ unsigned int i, n = gimple_phi_num_args(phi);
@@ -124262,7 +125191,7 @@ index 0000000..c43901f
+ }
+}
+
-+static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
++static void search_size_overflow_attribute_binary(gimple_set *visited, const_tree lhs)
+{
+ const_gimple def_stmt = get_def_stmt(lhs);
+ tree rhs1, rhs2;
@@ -124274,7 +125203,7 @@ index 0000000..c43901f
+ search_size_overflow_attribute(visited, rhs2);
+}
+
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
++static void search_size_overflow_attribute(gimple_set *visited, tree lhs)
+{
+ const_gimple def_stmt;
+
@@ -124324,18 +125253,20 @@ index 0000000..c43901f
+{
+ tree fndecl = NULL_TREE;
+ tree lhs;
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
+ return;
+
+ if (num == 0) {
+ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
-+ lhs = gimple_return_retval(stmt);
++ lhs = gimple_return_retval(as_a_const_greturn(stmt));
+ } else {
-+ gcc_assert(is_gimple_call(stmt));
-+ lhs = gimple_call_arg(stmt, num - 1);
-+ fndecl = gimple_call_fndecl(stmt);
++ const gcall *call = as_a_const_gcall(stmt);
++
++ gcc_assert(is_gimple_call(call));
++ lhs = gimple_call_arg(call, num - 1);
++ fndecl = gimple_call_fndecl(call);
+ }
+
+ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
@@ -124359,9 +125290,9 @@ index 0000000..c43901f
+ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
+ asm_data->output = make_ssa_name(asm_data->output, stmt);
+ if (gimple_code(stmt) == GIMPLE_RETURN)
-+ gimple_return_set_retval(stmt, asm_data->output);
++ gimple_return_set_retval(as_a_greturn(stmt), asm_data->output);
+ else
-+ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
++ gimple_call_set_arg(as_a_gcall(stmt), argnum - 1, asm_data->output);
+ update_stmt(stmt);
+}
+
@@ -124441,7 +125372,7 @@ index 0000000..c43901f
+ break;
+ }
+ case GIMPLE_ASM:
-+ if (is_size_overflow_asm(asm_data->def_stmt)) {
++ if (is_size_overflow_asm(as_a_const_gasm(asm_data->def_stmt))) {
+ asm_data->input = NULL_TREE;
+ break;
+ }
@@ -124472,7 +125403,7 @@ index 0000000..c43901f
+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
+
+ asm_data.def_stmt = get_def_stmt(asm_data.output);
-+ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
++ if (gimple_code(asm_data.def_stmt) == GIMPLE_ASM && is_size_overflow_intentional_asm_turn_off(as_a_const_gasm(asm_data.def_stmt)))
+ return;
+
+ create_asm_input(stmt, argnum, &asm_data);
@@ -124522,7 +125453,7 @@ index 0000000..c43901f
+ return true;
+}
+
-+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
++static void walk_use_def_ptr(gimple_set *visited, const_tree lhs)
+{
+ gimple def_stmt;
+
@@ -124539,28 +125470,33 @@ index 0000000..c43901f
+ case GIMPLE_CALL:
+ break;
+ case GIMPLE_PHI: {
-+ unsigned int i, n = gimple_phi_num_args(def_stmt);
++ gphi *phi = as_a_gphi(def_stmt);
++ unsigned int i, n = gimple_phi_num_args(phi);
+
+ pointer_set_insert(visited, def_stmt);
+
+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(def_stmt, i);
++ tree arg = gimple_phi_arg_def(phi, i);
+
+ walk_use_def_ptr(visited, arg);
+ }
++ break;
+ }
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
++ case GIMPLE_ASSIGN: {
++ gassign *assign = as_a_gassign(def_stmt);
++
++ switch (gimple_num_ops(assign)) {
+ case 2:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
++ walk_use_def_ptr(visited, gimple_assign_rhs1(assign));
+ return;
+ case 3:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
-+ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
++ walk_use_def_ptr(visited, gimple_assign_rhs1(assign));
++ walk_use_def_ptr(visited, gimple_assign_rhs2(assign));
+ return;
+ default:
+ return;
+ }
++ }
+ default:
+ debug_gimple_stmt((gimple)def_stmt);
+ error("%s: unknown gimple code", __func__);
@@ -124571,7 +125507,7 @@ index 0000000..c43901f
+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
+{
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ visited = pointer_set_create();
+ walk_use_def_ptr(visited, arg);
@@ -124579,7 +125515,7 @@ index 0000000..c43901f
+}
+
+// Determine the return value and insert the asm stmt to mark the return stmt.
-+static void insert_asm_ret(gimple stmt)
++static void insert_asm_ret(greturn *stmt)
+{
+ tree ret;
+
@@ -124588,7 +125524,7 @@ index 0000000..c43901f
+}
+
+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
-+static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
++static void insert_asm_arg(gcall *stmt, unsigned int orig_argnum)
+{
+ tree arg;
+ unsigned int argnum;
@@ -124665,7 +125601,7 @@ index 0000000..c43901f
+ * Look up the intentional_overflow attribute that turns off ipa based duplication
+ * on the callee function.
+ */
-+static bool is_mark_turn_off_attribute(gimple stmt)
++static bool is_mark_turn_off_attribute(gcall *stmt)
+{
+ enum mark mark;
+ const_tree fndecl = gimple_call_fndecl(stmt);
@@ -124677,7 +125613,7 @@ index 0000000..c43901f
+}
+
+// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
-+static void handle_interesting_function(gimple stmt)
++static void handle_interesting_function(gcall *stmt)
+{
+ unsigned int argnum;
+ tree fndecl;
@@ -124703,7 +125639,7 @@ index 0000000..c43901f
+}
+
+// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
-+static void handle_interesting_ret(gimple stmt)
++static void handle_interesting_ret(greturn *stmt)
+{
+ bool orig_argnums[MAX_PARAM + 1] = {false};
+
@@ -124724,13 +125660,13 @@ index 0000000..c43901f
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gimple stmt = gsi_stmt(gsi);
+
-+ if (is_size_overflow_asm(stmt))
++ if (gimple_code(stmt) == GIMPLE_ASM && is_size_overflow_asm(as_a_const_gasm(stmt)))
+ continue;
+
+ if (is_gimple_call(stmt))
-+ handle_interesting_function(stmt);
++ handle_interesting_function(as_a_gcall(stmt));
+ else if (gimple_code(stmt) == GIMPLE_RETURN)
-+ handle_interesting_ret(stmt);
++ handle_interesting_ret(as_a_greturn(stmt));
+ }
+ }
+ return 0;
@@ -124742,6 +125678,7 @@ index 0000000..c43901f
+ * that the ipa pass will detect and insert the size overflow checks for.
+ */
+#if BUILDING_GCC_VERSION >= 4009
++namespace {
+static const struct pass_data insert_size_overflow_asm_pass_data = {
+#else
+static struct gimple_opt_pass insert_size_overflow_asm_pass = {
@@ -124752,7 +125689,8 @@ index 0000000..c43901f
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
-+#if BUILDING_GCC_VERSION >= 4009
++#if BUILDING_GCC_VERSION >= 5000
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -124774,34 +125712,39 @@ index 0000000..c43901f
+};
+
+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
+class insert_size_overflow_asm_pass : public gimple_opt_pass {
+public:
+ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return search_interesting_functions(); }
++#else
+ unsigned int execute() { return search_interesting_functions(); }
++#endif
+};
+}
-+#endif
+
-+struct opt_pass *make_insert_size_overflow_asm_pass(void)
++opt_pass *make_insert_size_overflow_asm_pass(void)
+{
-+#if BUILDING_GCC_VERSION >= 4009
+ return new insert_size_overflow_asm_pass();
++}
+#else
++struct opt_pass *make_insert_size_overflow_asm_pass(void)
++{
+ return &insert_size_overflow_asm_pass.pass;
-+#endif
+}
++#endif
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
new file mode 100644
-index 0000000..73f0a12
+index 0000000..0766e39
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
-@@ -0,0 +1,943 @@
+@@ -0,0 +1,931 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -124865,19 +125808,6 @@ index 0000000..73f0a12
+ return new_type;
+}
+
-+static tree get_lhs(const_gimple stmt)
-+{
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ return gimple_get_lhs(stmt);
-+ case GIMPLE_PHI:
-+ return gimple_phi_result(stmt);
-+ default:
-+ return NULL_TREE;
-+ }
-+}
-+
+static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before)
+{
+ gimple_stmt_iterator gsi;
@@ -124951,7 +125881,7 @@ index 0000000..73f0a12
+ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before);
+}
+
-+tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
++tree dup_assign(struct visited *visited, gassign *oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
+{
+ gimple stmt;
+ gimple_stmt_iterator gsi;
@@ -125020,13 +125950,14 @@ index 0000000..73f0a12
+ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
+
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
+static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg)
+{
+ gimple_stmt_iterator gsi;
-+ gimple assign, def_stmt = get_def_stmt(new_arg);
++ gimple assign;
++ gimple def_stmt = get_def_stmt(new_arg);
+
+ if (gimple_code(def_stmt) == GIMPLE_PHI) {
+ gsi = gsi_after_labels(gimple_bb(def_stmt));
@@ -125037,7 +125968,7 @@ index 0000000..73f0a12
+ }
+
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
+static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type)
@@ -125054,13 +125985,12 @@ index 0000000..73f0a12
+
+ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
-+static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
++static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gphi *oldstmt, unsigned int i)
+{
-+ tree size_overflow_type;
-+ tree arg;
++ tree size_overflow_type, arg;
+ const_gimple def_stmt;
+
+ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
@@ -125077,7 +126007,7 @@ index 0000000..73f0a12
+ case GIMPLE_NOP: {
+ basic_block bb;
+
-+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
++ bb = gimple_phi_arg_edge(as_a_gphi(oldstmt), i)->src;
+ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb);
+ }
+ case GIMPLE_ASM: {
@@ -125087,7 +126017,7 @@ index 0000000..73f0a12
+ gsi = gsi_for_stmt(stmt);
+ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+ }
+ default:
+ gcc_assert(new_arg != NULL_TREE);
@@ -125096,10 +126026,10 @@ index 0000000..73f0a12
+ }
+}
+
-+static gimple overflow_create_phi_node(struct visited *visited, gimple oldstmt, tree result)
++static gphi *overflow_create_phi_node(struct visited *visited, gphi *oldstmt, tree result)
+{
+ basic_block bb;
-+ gimple phi;
++ gphi *phi;
+ gimple_seq seq;
+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
+
@@ -125112,7 +126042,7 @@ index 0000000..73f0a12
+ result = create_new_var(size_overflow_type);
+ }
+
-+ phi = create_phi_node(result, bb);
++ phi = as_a_gphi(create_phi_node(result, bb));
+ gimple_phi_set_result(phi, make_ssa_name(result, phi));
+ seq = phi_nodes(bb);
+ gsi = gsi_last(seq);
@@ -125125,12 +126055,12 @@ index 0000000..73f0a12
+}
+
+#if BUILDING_GCC_VERSION <= 4007
-+static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
++static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gphi *oldstmt)
+#else
-+static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
++static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gphi *oldstmt)
+#endif
+{
-+ gimple new_phi;
++ gphi *new_phi;
+ unsigned int i;
+ tree arg, result;
+ location_t loc = gimple_location(oldstmt);
@@ -125172,7 +126102,7 @@ index 0000000..73f0a12
+#else
+ vec<tree, va_heap, vl_embed> *args = NULL;
+#endif
-+ gimple oldstmt = get_def_stmt(orig_result);
++ gphi *oldstmt = as_a_gphi(get_def_stmt(orig_result));
+ unsigned int i, len = gimple_phi_num_args(oldstmt);
+
+ pointer_set_insert(visited->stmts, oldstmt);
@@ -125205,7 +126135,7 @@ index 0000000..73f0a12
+#endif
+}
+
-+static tree create_cast_assign(struct visited *visited, gimple stmt)
++static tree create_cast_assign(struct visited *visited, gassign *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree lhs = gimple_assign_lhs(stmt);
@@ -125218,7 +126148,7 @@ index 0000000..73f0a12
+ return create_assign(visited, stmt, rhs1, AFTER_STMT);
+}
+
-+static bool skip_lhs_cast_check(const_gimple stmt)
++static bool skip_lhs_cast_check(const gassign *stmt)
+{
+ const_tree rhs = gimple_assign_rhs1(stmt);
+ const_gimple def_stmt = get_def_stmt(rhs);
@@ -125252,7 +126182,7 @@ index 0000000..73f0a12
+
+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
+{
-+ gimple cond_stmt;
++ gcond *cond_stmt;
+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
+
+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
@@ -125262,7 +126192,7 @@ index 0000000..73f0a12
+
+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
+{
-+ gimple func_stmt;
++ gcall *func_stmt;
+ const_gimple def_stmt;
+ const_tree loc_line;
+ tree loc_file, ssa_name, current_func;
@@ -125300,7 +126230,7 @@ index 0000000..73f0a12
+ ssa_name = create_string_param(ssa_name);
+
+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
++ func_stmt = as_a_gcall(gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name));
+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
+
+ callee_node = cgraph_get_create_node(report_size_overflow_decl);
@@ -125384,7 +126314,7 @@ index 0000000..73f0a12
+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
+}
+
-+static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
++static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gassign *stmt)
+{
+ bool cast_lhs, cast_rhs;
+ tree lhs = gimple_assign_lhs(stmt);
@@ -125437,7 +126367,7 @@ index 0000000..73f0a12
+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+}
+
-+static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gassign *stmt)
+{
+ enum tree_code rhs_code;
+ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
@@ -125472,10 +126402,10 @@ index 0000000..73f0a12
+ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
+}
+
-+static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gassign *stmt)
+{
+ tree rhs1, lhs = gimple_assign_lhs(stmt);
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+
+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
+ rhs1 = gimple_assign_rhs1(def_stmt);
@@ -125534,7 +126464,7 @@ index 0000000..73f0a12
+}
+
+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
-+static bool is_a_ptr_minus(gimple stmt)
++static bool is_a_ptr_minus(gassign *stmt)
+{
+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
+
@@ -125562,7 +126492,7 @@ index 0000000..73f0a12
+{
+ enum intentional_overflow_type res;
+ tree rhs1, rhs2, new_lhs;
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+
@@ -125603,13 +126533,13 @@ index 0000000..73f0a12
+ res = add_mul_intentional_overflow(def_stmt);
+ if (res != NO_INTENTIONAL_OVERFLOW) {
+ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+ insert_cast_expr(visited, get_def_stmt(new_lhs), res);
++ insert_cast_expr(visited, as_a_gassign(get_def_stmt(new_lhs)), res);
+ return new_lhs;
+ }
+
+ if (skip_expr_on_double_type(def_stmt)) {
+ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+ insert_cast_expr(visited, get_def_stmt(new_lhs), NO_INTENTIONAL_OVERFLOW);
++ insert_cast_expr(visited, as_a_gassign(get_def_stmt(new_lhs)), NO_INTENTIONAL_OVERFLOW);
+ return new_lhs;
+ }
+
@@ -125646,7 +126576,7 @@ index 0000000..73f0a12
+static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
+{
+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+
+ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs);
+
@@ -125725,7 +126655,7 @@ index 0000000..73f0a12
+ case GIMPLE_ASSIGN:
+ switch (gimple_num_ops(def_stmt)) {
+ case 2:
-+ return handle_unary_ops(visited, caller_node, def_stmt);
++ return handle_unary_ops(visited, caller_node, as_a_gassign(def_stmt));
+ case 3:
+ return handle_binary_ops(visited, caller_node, lhs);
+#if BUILDING_GCC_VERSION >= 4006
@@ -125742,16 +126672,17 @@ index 0000000..73f0a12
+
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
new file mode 100644
-index 0000000..df50164
+index 0000000..e1e6e19
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
-@@ -0,0 +1,1141 @@
+@@ -0,0 +1,1157 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ * https://github.com/ephox-gcc-plugins
+ *
+ * Documentation:
+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
@@ -125775,8 +126706,8 @@ index 0000000..df50164
+
+unsigned int call_count;
+
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
++static void set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree lhs);
++static void walk_use_def(gimple_set *visited, struct interesting_node *cur_node, tree lhs);
+
+struct visited_fns {
+ struct visited_fns *next;
@@ -125946,9 +126877,9 @@ index 0000000..df50164
+ return cnodes;
+}
+
-+static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
++static void walk_phi_set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree result)
+{
-+ gimple phi = get_def_stmt(result);
++ gphi *phi = as_a_gphi(get_def_stmt(result));
+ unsigned int i, n = gimple_phi_num_args(phi);
+
+ pointer_set_insert(visited, phi);
@@ -125964,7 +126895,7 @@ index 0000000..df50164
+};
+
+// Search for constants, cast assignments and binary/ternary assignments
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
++static void set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree lhs)
+{
+ gimple def_stmt = get_def_stmt(lhs);
+
@@ -125981,7 +126912,7 @@ index 0000000..df50164
+
+ switch (gimple_code(def_stmt)) {
+ case GIMPLE_CALL:
-+ if (lhs == gimple_call_lhs(def_stmt))
++ if (lhs == gimple_call_lhs(as_a_const_gcall(def_stmt)))
+ interesting_conditions[RET] = true;
+ return;
+ case GIMPLE_NOP:
@@ -125990,11 +126921,13 @@ index 0000000..df50164
+ case GIMPLE_PHI:
+ interesting_conditions[PHI] = true;
+ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
-+ case GIMPLE_ASSIGN:
-+ if (gimple_num_ops(def_stmt) == 2) {
-+ const_tree rhs = gimple_assign_rhs1(def_stmt);
++ case GIMPLE_ASSIGN: {
++ gassign *assign = as_a_gassign(def_stmt);
++
++ if (gimple_num_ops(assign) == 2) {
++ const_tree rhs = gimple_assign_rhs1(assign);
+
-+ if (gimple_assign_cast_p(def_stmt))
++ if (gimple_assign_cast_p(assign))
+ interesting_conditions[CAST] = true;
+
+ return set_conditions(visited, interesting_conditions, rhs);
@@ -126002,6 +126935,7 @@ index 0000000..df50164
+ interesting_conditions[NOT_UNARY] = true;
+ return;
+ }
++ }
+ default:
+ debug_gimple_stmt(def_stmt);
+ gcc_unreachable();
@@ -126011,7 +126945,7 @@ index 0000000..df50164
+// determine whether duplication will be necessary or not.
+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
+{
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ if (gimple_assign_cast_p(cur_node->first_stmt))
+ interesting_conditions[CAST] = true;
@@ -126024,9 +126958,9 @@ index 0000000..df50164
+}
+
+// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
-+static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
++static void replace_size_overflow_asm_with_assign(gasm *asm_stmt, tree lhs, tree rhs)
+{
-+ gimple assign;
++ gassign *assign;
+ gimple_stmt_iterator gsi;
+
+ // already removed
@@ -126067,13 +127001,13 @@ index 0000000..df50164
+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+ return false;
+
-+ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
++ def_stmt = get_def_stmt(gimple_assign_rhs1(as_a_gassign(def_stmt)));
+ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
+}
+
-+static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
++static void walk_use_def_phi(gimple_set *visited, struct interesting_node *cur_node, tree result)
+{
-+ gimple phi = get_def_stmt(result);
++ gphi *phi = as_a_gphi(get_def_stmt(result));
+ unsigned int i, n = gimple_phi_num_args(phi);
+
+ pointer_set_insert(visited, phi);
@@ -126084,9 +127018,9 @@ index 0000000..df50164
+ }
+}
+
-+static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++static void walk_use_def_binary(gimple_set *visited, struct interesting_node *cur_node, tree lhs)
+{
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+ tree rhs1, rhs2;
+
+ rhs1 = gimple_assign_rhs1(def_stmt);
@@ -126135,16 +127069,16 @@ index 0000000..df50164
+}
+
+// a size_overflow asm stmt in the control flow doesn't stop the recursion
-+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
++static void handle_asm_stmt(gimple_set *visited, struct interesting_node *cur_node, tree lhs, const gasm *stmt)
+{
-+ if (!is_size_overflow_asm(stmt))
++ if (gimple_code(stmt) != GIMPLE_ASM || !is_size_overflow_asm(stmt))
+ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+}
+
+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
+ * and component refs (for checking the intentional_overflow attribute).
+ */
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++static void walk_use_def(gimple_set *visited, struct interesting_node *cur_node, tree lhs)
+{
+ const_gimple def_stmt;
+
@@ -126164,9 +127098,9 @@ index 0000000..df50164
+ case GIMPLE_NOP:
+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+ case GIMPLE_ASM:
-+ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
++ return handle_asm_stmt(visited, cur_node, lhs, as_a_const_gasm(def_stmt));
+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
++ tree fndecl = gimple_call_fndecl(as_a_const_gcall(def_stmt));
+
+ if (fndecl == NULL_TREE)
+ return;
@@ -126178,7 +127112,7 @@ index 0000000..df50164
+ case GIMPLE_ASSIGN:
+ switch (gimple_num_ops(def_stmt)) {
+ case 2:
-+ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(as_a_const_gassign(def_stmt)));
+ case 3:
+ return walk_use_def_binary(visited, cur_node, lhs);
+ }
@@ -126192,7 +127126,7 @@ index 0000000..df50164
+// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
+static void set_last_nodes(struct interesting_node *cur_node)
+{
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ visited = pointer_set_create();
+ walk_use_def(visited, cur_node, cur_node->node);
@@ -126245,7 +127179,7 @@ index 0000000..df50164
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
+static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node)
@@ -126256,10 +127190,10 @@ index 0000000..df50164
+
+ switch (gimple_code(stmt)) {
+ case GIMPLE_RETURN:
-+ gimple_return_set_retval(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ gimple_return_set_retval(as_a_greturn(stmt), cast_to_orig_type(visited, stmt, orig_node, new_node));
+ break;
+ case GIMPLE_CALL:
-+ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ gimple_call_set_arg(as_a_gcall(stmt), cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
+ break;
+ case GIMPLE_ASSIGN:
+ switch (cur_node->num) {
@@ -126278,7 +127212,7 @@ index 0000000..df50164
+ gcc_unreachable();
+ }
+
-+ set_rhs(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ set_rhs(as_a_gassign(stmt), cast_to_orig_type(visited, stmt, orig_node, new_node));
+ break;
+ default:
+ debug_gimple_stmt(stmt);
@@ -126365,7 +127299,7 @@ index 0000000..df50164
+ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
+ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
+ */
-+static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gasm *asm_stmt)
+{
+ struct interesting_node *new_node;
+ tree fndecl;
@@ -126385,7 +127319,7 @@ index 0000000..df50164
+ return head;
+
+ if (is_gimple_call(first_stmt))
-+ fndecl = gimple_call_fndecl(first_stmt);
++ fndecl = gimple_call_fndecl(as_a_const_gcall(first_stmt));
+ else
+ fndecl = current_function_decl;
+
@@ -126421,7 +127355,7 @@ index 0000000..df50164
+/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
+ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, greturn *stmt, struct next_cgraph_node *next_node)
+{
+ struct next_cgraph_node *cur_node;
+ tree ret = gimple_return_retval(stmt);
@@ -126442,7 +127376,7 @@ index 0000000..df50164
+/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
+ * If the call stmt is in the next cgraph node list then it's an interesting call.
+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gcall *stmt, struct next_cgraph_node *next_node)
+{
+ unsigned int argnum;
+ tree arg;
@@ -126478,7 +127412,7 @@ index 0000000..df50164
+}
+
+// Get the index of the rhs node in an assignment
-+static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
++static unsigned int get_assign_ops_count(const gassign *stmt, tree node)
+{
+ const_tree rhs1, rhs2;
+ unsigned int ret;
@@ -126506,7 +127440,7 @@ index 0000000..df50164
+}
+
+// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
-+static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
++static unsigned int find_arg_number_gimple(const_tree arg, const gcall *stmt)
+{
+ unsigned int i;
+
@@ -126529,7 +127463,7 @@ index 0000000..df50164
+/* starting from the size_overflow asm stmt collect interesting stmts. They can be
+ * any of return, call or assignment stmts (because of inlining).
+ */
-+static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
++static struct interesting_node *get_interesting_ret_or_call(tree_set *visited, struct interesting_node *head, tree node, gasm *intentional_asm)
+{
+ use_operand_p use_p;
+ imm_use_iterator imm_iter;
@@ -126550,28 +127484,31 @@ index 0000000..df50164
+
+ switch (gimple_code(stmt)) {
+ case GIMPLE_CALL:
-+ argnum = find_arg_number_gimple(node, stmt);
++ argnum = find_arg_number_gimple(node, as_a_gcall(stmt));
+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
+ break;
+ case GIMPLE_RETURN:
+ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
+ break;
+ case GIMPLE_ASSIGN:
-+ argnum = get_assign_ops_count(stmt, node);
++ argnum = get_assign_ops_count(as_a_const_gassign(stmt), node);
+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
+ break;
+ case GIMPLE_PHI: {
-+ tree result = gimple_phi_result(stmt);
++ tree result = gimple_phi_result(as_a_gphi(stmt));
+ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
+ break;
+ }
-+ case GIMPLE_ASM:
-+ if (gimple_asm_noutputs(stmt) != 0)
++ case GIMPLE_ASM: {
++ gasm *asm_stmt = as_a_gasm(stmt);
++
++ if (gimple_asm_noutputs(asm_stmt) != 0)
+ break;
-+ if (!is_size_overflow_asm(stmt))
++ if (!is_size_overflow_asm(asm_stmt))
+ break;
-+ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
++ head = create_new_interesting_node(head, asm_stmt, node, 1, intentional_asm);
+ break;
++ }
+ case GIMPLE_COND:
+ case GIMPLE_SWITCH:
+ break;
@@ -126586,66 +127523,71 @@ index 0000000..df50164
+
+static void remove_size_overflow_asm(gimple stmt)
+{
++ gasm *asm_stmt;
+ gimple_stmt_iterator gsi;
+ tree input, output;
+
-+ if (!is_size_overflow_asm(stmt))
++ if (gimple_code(stmt) != GIMPLE_ASM)
+ return;
+
-+ if (gimple_asm_noutputs(stmt) == 0) {
-+ gsi = gsi_for_stmt(stmt);
-+ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
++ asm_stmt = as_a_gasm(stmt);
++ if (!is_size_overflow_asm(asm_stmt))
++ return;
++
++ if (gimple_asm_noutputs(asm_stmt) == 0) {
++ gsi = gsi_for_stmt(asm_stmt);
++ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), asm_stmt);
+ gsi_remove(&gsi, true);
+ return;
+ }
+
-+ input = gimple_asm_input_op(stmt, 0);
-+ output = gimple_asm_output_op(stmt, 0);
-+ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
++ input = gimple_asm_input_op(asm_stmt, 0);
++ output = gimple_asm_output_op(asm_stmt, 0);
++ replace_size_overflow_asm_with_assign(asm_stmt, TREE_VALUE(output), TREE_VALUE(input));
+}
+
+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
+ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
+ */
-+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
++static struct interesting_node *handle_stmt_by_size_overflow_asm(gasm *asm_stmt, struct interesting_node *head)
+{
+ const_tree output;
-+ struct pointer_set_t *visited;
-+ gimple intentional_asm = NOT_INTENTIONAL_ASM;
++ tree_set *visited;
++ gasm *intentional_asm = NOT_INTENTIONAL_ASM;
+
-+ if (!is_size_overflow_asm(stmt))
++ if (!is_size_overflow_asm(asm_stmt))
+ return head;
+
-+ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
-+ intentional_asm = stmt;
++ if (is_size_overflow_intentional_asm_yes(asm_stmt) || is_size_overflow_intentional_asm_turn_off(asm_stmt))
++ intentional_asm = asm_stmt;
+
-+ gcc_assert(gimple_asm_ninputs(stmt) == 1);
++ gcc_assert(gimple_asm_ninputs(asm_stmt) == 1);
+
-+ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ if (gimple_asm_noutputs(asm_stmt) == 0 && is_size_overflow_intentional_asm_turn_off(asm_stmt))
+ return head;
+
-+ if (gimple_asm_noutputs(stmt) == 0) {
++ if (gimple_asm_noutputs(asm_stmt) == 0) {
+ const_tree input;
+
-+ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ if (!is_size_overflow_intentional_asm_turn_off(asm_stmt))
+ return head;
+
-+ input = gimple_asm_input_op(stmt, 0);
-+ remove_size_overflow_asm(stmt);
++ input = gimple_asm_input_op(asm_stmt, 0);
++ remove_size_overflow_asm(asm_stmt);
+ if (is_gimple_constant(TREE_VALUE(input)))
+ return head;
-+ visited = pointer_set_create();
++ visited = tree_pointer_set_create();
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
+ pointer_set_destroy(visited);
+ return head;
+ }
+
-+ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
-+ remove_size_overflow_asm(stmt);
++ if (!is_size_overflow_intentional_asm_yes(asm_stmt) && !is_size_overflow_intentional_asm_turn_off(asm_stmt))
++ remove_size_overflow_asm(asm_stmt);
+
-+ visited = pointer_set_create();
-+ output = gimple_asm_output_op(stmt, 0);
++ visited = tree_pointer_set_create();
++ output = gimple_asm_output_op(asm_stmt, 0);
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
+ pointer_set_destroy(visited);
+ return head;
@@ -126669,14 +127611,14 @@ index 0000000..df50164
+ code = gimple_code(stmt);
+
+ if (code == GIMPLE_ASM)
-+ head = handle_stmt_by_size_overflow_asm(stmt, head);
++ head = handle_stmt_by_size_overflow_asm(as_a_gasm(stmt), head);
+
+ if (!next_node)
+ continue;
+ if (code == GIMPLE_CALL)
-+ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
++ head = handle_stmt_by_cgraph_nodes_call(head, as_a_gcall(stmt), next_node);
+ if (code == GIMPLE_RETURN)
-+ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
++ head = handle_stmt_by_cgraph_nodes_ret(head, as_a_greturn(stmt), next_node);
+ }
+ }
+ return head;
@@ -126813,7 +127755,6 @@ index 0000000..df50164
+ struct visited_fns *visited_fns = NULL;
+
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ gcc_assert(cgraph_function_flags_ready);
+#if BUILDING_GCC_VERSION <= 4007
+ gcc_assert(node->reachable);
+#endif
@@ -126826,6 +127767,7 @@ index 0000000..df50164
+}
+
+#if BUILDING_GCC_VERSION >= 4009
++namespace {
+static const struct pass_data insert_size_overflow_check_data = {
+#else
+static struct ipa_opt_pass_d insert_size_overflow_check = {
@@ -126836,7 +127778,8 @@ index 0000000..df50164
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
-+#if BUILDING_GCC_VERSION >= 4009
++#if BUILDING_GCC_VERSION >= 5000
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -126869,36 +127812,40 @@ index 0000000..df50164
+};
+
+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
+class insert_size_overflow_check : public ipa_opt_pass_d {
+public:
+ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return search_function(); }
++#else
+ unsigned int execute() { return search_function(); }
++#endif
+};
+}
-+#endif
+
-+struct opt_pass *make_insert_size_overflow_check(void)
++opt_pass *make_insert_size_overflow_check(void)
+{
-+#if BUILDING_GCC_VERSION >= 4009
+ return new insert_size_overflow_check();
++}
+#else
++struct opt_pass *make_insert_size_overflow_check(void)
++{
+ return &insert_size_overflow_check.pass;
-+#endif
+}
-+
++#endif
diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
new file mode 100644
-index 0000000..d71d72a
+index 0000000..eb62680
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
-@@ -0,0 +1,736 @@
+@@ -0,0 +1,748 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ * https://github.com/ephox-gcc-plugins
+ *
+ * Documentation:
+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
@@ -126943,7 +127890,7 @@ index 0000000..d71d72a
+ if (param_head == NULL_TREE)
+ return false;
+
-+ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
++ if (tree_to_shwi(TREE_VALUE(param_head)) == -1)
+ return true;
+ return false;
+}
@@ -127135,13 +128082,15 @@ index 0000000..d71d72a
+{
+ const_tree rhs1, lhs, rhs1_type, lhs_type;
+ enum machine_mode lhs_mode, rhs_mode;
++ const gassign *assign;
+ gimple def_stmt = get_def_stmt(no_const_rhs);
+
+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ lhs = gimple_assign_lhs(def_stmt);
++ assign = as_a_const_gassign(def_stmt);
++ rhs1 = gimple_assign_rhs1(assign);
++ lhs = gimple_assign_lhs(assign);
+ rhs1_type = TREE_TYPE(rhs1);
+ lhs_type = TREE_TYPE(lhs);
+ rhs_mode = TYPE_MODE(rhs1_type);
@@ -127165,7 +128114,7 @@ index 0000000..d71d72a
+ return num;
+ if (is_gimple_debug(use_stmt))
+ continue;
-+ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(use_stmt)))
++ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(as_a_const_gassign(use_stmt))))
+ continue;
+ num++;
+ }
@@ -127181,12 +128130,14 @@ index 0000000..d71d72a
+bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
+{
+ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
++ gassign *assign;
+ gimple def_stmt = get_def_stmt(lhs);
+
+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
++ assign = as_a_gassign(def_stmt);
++ rhs1 = gimple_assign_rhs1(assign);
+ rhs_type = TREE_TYPE(rhs1);
+ lhs_type = TREE_TYPE(lhs);
+ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
@@ -127198,11 +128149,12 @@ index 0000000..d71d72a
+ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
+ return false;
+
-+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++ assign = as_a_gassign(def_stmt);
++ if (gimple_assign_rhs_code(assign) != PLUS_EXPR)
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs1 = gimple_assign_rhs1(assign);
++ rhs2 = gimple_assign_rhs2(assign);
+ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
+ return false;
+
@@ -127259,7 +128211,7 @@ index 0000000..d71d72a
+ return false;
+}
+
-+bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
++bool is_a_constant_overflow(const gassign *stmt, const_tree rhs)
+{
+ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
+ return false;
@@ -127273,7 +128225,7 @@ index 0000000..d71d72a
+ return true;
+}
+
-+static tree change_assign_rhs(struct visited *visited, gimple stmt, const_tree orig_rhs, tree new_rhs)
++static tree change_assign_rhs(struct visited *visited, gassign *stmt, const_tree orig_rhs, tree new_rhs)
+{
+ gimple assign;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
@@ -127283,10 +128235,10 @@ index 0000000..d71d72a
+
+ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
-+tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
++tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gassign *stmt, tree change_rhs, tree new_rhs2)
+{
+ tree new_rhs, orig_rhs;
+ void (*gimple_assign_set_rhs)(gimple, tree);
@@ -127317,9 +128269,10 @@ index 0000000..d71d72a
+ return create_assign(visited, stmt, lhs, AFTER_STMT);
+}
+
-+static bool is_subtraction_special(struct visited *visited, const_gimple stmt)
++static bool is_subtraction_special(struct visited *visited, const gassign *stmt)
+{
-+ gimple rhs1_def_stmt, rhs2_def_stmt;
++ gimple def_stmt_1, def_stmt_2;
++ const gassign *rhs1_def_stmt, *rhs2_def_stmt;
+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
+ const_tree rhs1 = gimple_assign_rhs1(stmt);
@@ -127333,15 +128286,18 @@ index 0000000..d71d72a
+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
+ return false;
+
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ rhs2_def_stmt = get_def_stmt(rhs2);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ def_stmt_1 = get_def_stmt(rhs1);
++ def_stmt_2 = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(def_stmt_1) || !gimple_assign_cast_p(def_stmt_2))
+ return false;
+
++ rhs1_def_stmt = as_a_const_gassign(def_stmt_1);
++ rhs2_def_stmt = as_a_const_gassign(def_stmt_2);
+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
+ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
+ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
++
+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
+ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
@@ -127356,15 +128312,15 @@ index 0000000..d71d72a
+ return true;
+}
+
-+static gimple create_binary_assign(struct visited *visited, enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++static gassign *create_binary_assign(struct visited *visited, enum tree_code code, gassign *stmt, tree rhs1, tree rhs2)
+{
-+ gimple assign;
++ gassign *assign;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+ tree type = TREE_TYPE(rhs1);
+ tree lhs = create_new_var(type);
+
+ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
-+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ assign = as_a_gassign(gimple_build_assign_with_ops(code, lhs, rhs1, rhs2));
+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
+
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
@@ -127384,11 +128340,11 @@ index 0000000..d71d72a
+
+ gsi = gsi_for_stmt(stmt);
+ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ pointer_set_insert(visited->my_stmts, cast_stmt);
-+ return gimple_assign_lhs(cast_stmt);
++ pointer_set_insert(visited->my_stmts, (gimple)cast_stmt);
++ return get_lhs(cast_stmt);
+}
+
-+static tree get_def_stmt_rhs(struct visited *visited, const_tree var)
++static tree get_def_stmt_rhs(const_tree var)
+{
+ tree rhs1, def_stmt_rhs1;
+ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
@@ -127396,14 +128352,13 @@ index 0000000..d71d72a
+ def_stmt = get_def_stmt(var);
+ if (!gimple_assign_cast_p(def_stmt))
+ return NULL_TREE;
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && pointer_set_contains(visited->my_stmts, def_stmt) && gimple_assign_cast_p(def_stmt));
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1 = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
+ rhs1_def_stmt = get_def_stmt(rhs1);
+ if (!gimple_assign_cast_p(rhs1_def_stmt))
+ return rhs1;
+
-+ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ def_stmt_rhs1 = gimple_assign_rhs1(as_a_const_gassign(rhs1_def_stmt));
+ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
+
+ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
@@ -127424,7 +128379,7 @@ index 0000000..d71d72a
+{
+ tree new_rhs1, new_rhs2;
+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
-+ gimple assign, stmt = get_def_stmt(lhs);
++ gassign *assign, *stmt = as_a_gassign(get_def_stmt(lhs));
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree rhs2 = gimple_assign_rhs2(stmt);
+
@@ -127434,8 +128389,8 @@ index 0000000..d71d72a
+ new_rhs1 = expand(visited, caller_node, rhs1);
+ new_rhs2 = expand(visited, caller_node, rhs2);
+
-+ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs1);
-+ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs2);
++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
+
+ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
+ return NULL_TREE;
@@ -127478,6 +128433,7 @@ index 0000000..d71d72a
+ const_tree res;
+ tree rhs1, rhs2, def_rhs1, def_rhs2, const_rhs, def_const_rhs;
+ const_gimple def_stmt;
++ const gassign *assign, *def_assign;
+
+ if (!stmt || gimple_code(stmt) == GIMPLE_NOP)
+ return false;
@@ -127486,8 +128442,9 @@ index 0000000..d71d72a
+ if (gimple_assign_rhs_code(stmt) != MULT_EXPR)
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ rhs2 = gimple_assign_rhs2(stmt);
++ assign = as_a_const_gassign(stmt);
++ rhs1 = gimple_assign_rhs1(assign);
++ rhs2 = gimple_assign_rhs2(assign);
+ if (is_gimple_constant(rhs1)) {
+ const_rhs = rhs1;
+ def_stmt = get_def_stmt(rhs2);
@@ -127503,8 +128460,9 @@ index 0000000..d71d72a
+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
+ return false;
+
-+ def_rhs1 = gimple_assign_rhs1(def_stmt);
-+ def_rhs2 = gimple_assign_rhs2(def_stmt);
++ def_assign = as_a_const_gassign(def_stmt);
++ def_rhs1 = gimple_assign_rhs1(def_assign);
++ def_rhs2 = gimple_assign_rhs2(def_assign);
+ if (is_gimple_constant(def_rhs1))
+ def_const_rhs = def_rhs1;
+ else if (is_gimple_constant(def_rhs2))
@@ -127512,13 +128470,13 @@ index 0000000..d71d72a
+ else
+ return false;
+
-+ res = fold_binary_loc(gimple_location(def_stmt), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
++ res = fold_binary_loc(gimple_location(def_assign), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
+ if (is_lt_signed_type_max(res) && is_gt_zero(res))
+ return false;
+ return true;
+}
+
-+enum intentional_overflow_type add_mul_intentional_overflow(const_gimple stmt)
++enum intentional_overflow_type add_mul_intentional_overflow(const gassign *stmt)
+{
+ const_gimple def_stmt_1, def_stmt_2;
+ const_tree rhs1, rhs2;
@@ -127584,17 +128542,17 @@ index 0000000..d71d72a
+
+ if (!is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 2)
+ return false;
-+ rhs = gimple_assign_rhs1(def_stmt);
++ rhs = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
+ def_stmt = get_def_stmt(rhs);
+ if (!def_stmt)
+ return false;
+ return is_call_or_cast(def_stmt);
+}
+
-+void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt)
++void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt)
+{
+ unsigned int use_num;
-+ gimple so_stmt;
++ gassign *so_stmt;
+ const_gimple def_stmt;
+ const_tree rhs1, rhs2;
+ tree rhs = gimple_assign_rhs1(stmt);
@@ -127615,31 +128573,32 @@ index 0000000..d71d72a
+ if (!is_gimple_assign(def_stmt))
+ return;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1 = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
+ if (!is_unsigned_cast_or_call_def_stmt(rhs1))
+ return;
+
-+ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs2 = gimple_assign_rhs2(as_a_const_gassign(def_stmt));
+ if (!is_unsigned_cast_or_call_def_stmt(rhs2))
+ return;
+ if (gimple_num_ops(def_stmt) == 3 && !is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
+ return;
+
-+ so_stmt = get_dup_stmt(visited, stmt);
++ so_stmt = as_a_gassign(get_dup_stmt(visited, stmt));
+ create_up_and_down_cast(visited, so_stmt, lhs_type, gimple_assign_rhs1(so_stmt));
+}
+
diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c
new file mode 100644
-index 0000000..4bddad2
+index 0000000..253b4a8b
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/misc.c
-@@ -0,0 +1,203 @@
+@@ -0,0 +1,219 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -127673,6 +128632,20 @@ index 0000000..4bddad2
+ current_function_decl = NULL_TREE;
+}
+
++tree get_lhs(const_gimple stmt)
++{
++ switch (gimple_code(stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ return gimple_get_lhs(as_a_const_gassign(stmt));
++ case GIMPLE_PHI:
++ return gimple_phi_result(as_a_const_gphi(stmt));
++ default:
++ debug_gimple_stmt((gimple)stmt);
++ gcc_unreachable();
++ }
++}
++
+static bool is_bool(const_tree node)
+{
+ const_tree type;
@@ -127784,7 +128757,8 @@ index 0000000..4bddad2
+
+gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
+{
-+ gimple assign, def_stmt;
++ gimple def_stmt;
++ gassign *assign;
+
+ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
+ gcc_assert(!is_gimple_constant(rhs));
@@ -127840,15 +128814,16 @@ index 0000000..4bddad2
+
diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
new file mode 100644
-index 0000000..7c9e6d1
+index 0000000..de5999d
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
-@@ -0,0 +1,138 @@
+@@ -0,0 +1,139 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -127866,7 +128841,7 @@ index 0000000..7c9e6d1
+#include "gcc-common.h"
+#include "size_overflow.h"
+
-+bool skip_expr_on_double_type(const_gimple stmt)
++bool skip_expr_on_double_type(const gassign *stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code(stmt);
+
@@ -127888,19 +128863,19 @@ index 0000000..7c9e6d1
+ }
+}
+
-+void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs)
++void create_up_and_down_cast(struct visited *visited, gassign *use_stmt, tree orig_type, tree rhs)
+{
+ const_tree orig_rhs1;
+ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs);
-+ gimple down_cast, up_cast;
++ const_gimple down_cast, up_cast;
+ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt);
+
+ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ down_lhs = gimple_assign_lhs(down_cast);
++ down_lhs = get_lhs(down_cast);
+
+ gsi = gsi_for_stmt(use_stmt);
+ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ new_lhs = gimple_assign_lhs(up_cast);
++ new_lhs = get_lhs(up_cast);
+
+ orig_rhs1 = gimple_assign_rhs1(use_stmt);
+ if (operand_equal_p(orig_rhs1, rhs, 0))
@@ -127944,7 +128919,7 @@ index 0000000..7c9e6d1
+ return new_type;
+}
+
-+static void insert_cast_rhs(struct visited *visited, gimple stmt, tree rhs)
++static void insert_cast_rhs(struct visited *visited, gassign *stmt, tree rhs)
+{
+ tree type;
+
@@ -127959,7 +128934,7 @@ index 0000000..7c9e6d1
+ create_up_and_down_cast(visited, stmt, type, rhs);
+}
+
-+static void insert_cast(struct visited *visited, gimple stmt, tree rhs)
++static void insert_cast(struct visited *visited, gassign *stmt, tree rhs)
+{
+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && !is_size_overflow_type(rhs))
+ return;
@@ -127967,7 +128942,7 @@ index 0000000..7c9e6d1
+ insert_cast_rhs(visited, stmt, rhs);
+}
+
-+void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type)
++void insert_cast_expr(struct visited *visited, gassign *stmt, enum intentional_overflow_type type)
+{
+ tree rhs1, rhs2;
+
@@ -127984,10 +128959,10 @@ index 0000000..7c9e6d1
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h
new file mode 100644
-index 0000000..37f8fc3
+index 0000000..20732b1
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow.h
-@@ -0,0 +1,127 @@
+@@ -0,0 +1,183 @@
+#ifndef SIZE_OVERFLOW_H
+#define SIZE_OVERFLOW_H
+
@@ -128009,11 +128984,66 @@ index 0000000..37f8fc3
+ NO_INTENTIONAL_OVERFLOW, RHS1_INTENTIONAL_OVERFLOW, RHS2_INTENTIONAL_OVERFLOW
+};
+
++
++#if BUILDING_GCC_VERSION >= 5000
++typedef struct hash_set<const_gimple> gimple_set;
++
++static inline bool pointer_set_insert(gimple_set *visited, const_gimple stmt)
++{
++ return visited->add(stmt);
++}
++
++static inline bool pointer_set_contains(gimple_set *visited, const_gimple stmt)
++{
++ return visited->contains(stmt);
++}
++
++static inline gimple_set* pointer_set_create(void)
++{
++ return new hash_set<const_gimple>;
++}
++
++static inline void pointer_set_destroy(gimple_set *visited)
++{
++ delete visited;
++}
++
++typedef struct hash_set<tree> tree_set;
++
++static inline bool pointer_set_insert(tree_set *visited, tree node)
++{
++ return visited->add(node);
++}
++
++static inline bool pointer_set_contains(tree_set *visited, tree node)
++{
++ return visited->contains(node);
++}
++
++static inline tree_set *tree_pointer_set_create(void)
++{
++ return new hash_set<tree>;
++}
++
++static inline void pointer_set_destroy(tree_set *visited)
++{
++ delete visited;
++}
++#else
++typedef struct pointer_set_t gimple_set;
++typedef struct pointer_set_t tree_set;
++
++static inline tree_set *tree_pointer_set_create(void)
++{
++ return pointer_set_create();
++}
++#endif
++
+struct visited {
-+ struct pointer_set_t *stmts;
-+ struct pointer_set_t *my_stmts;
-+ struct pointer_set_t *skip_expr_casts;
-+ struct pointer_set_t *no_cast_check;
++ gimple_set *stmts;
++ gimple_set *my_stmts;
++ gimple_set *skip_expr_casts;
++ gimple_set *no_cast_check;
+};
+
+// size_overflow_plugin.c
@@ -128044,10 +129074,10 @@ index 0000000..37f8fc3
+ unsigned int num;
+ enum mark intentional_attr_decl;
+ enum mark intentional_attr_cur_fndecl;
-+ gimple intentional_mark_from_gimple;
++ gasm *intentional_mark_from_gimple;
+};
+
-+extern bool is_size_overflow_asm(const_gimple stmt);
++extern bool is_size_overflow_asm(const gasm *stmt);
+extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl);
+extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl);
+extern bool is_missing_function(const_tree orig_fndecl, unsigned int num);
@@ -128062,8 +129092,8 @@ index 0000000..37f8fc3
+
+// intentional_overflow.c
+extern enum mark get_intentional_attr_type(const_tree node);
-+extern bool is_size_overflow_intentional_asm_yes(const_gimple stmt);
-+extern bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt);
++extern bool is_size_overflow_intentional_asm_yes(const gasm *stmt);
++extern bool is_size_overflow_intentional_asm_turn_off(const gasm *stmt);
+extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum);
+extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum);
+extern bool is_turn_off_intentional_attr(const_tree decl);
@@ -128071,12 +129101,12 @@ index 0000000..37f8fc3
+extern void check_intentional_attribute_ipa(struct interesting_node *cur_node);
+extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs);
+extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs);
-+extern bool is_a_constant_overflow(const_gimple stmt, const_tree rhs);
-+extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2);
++extern bool is_a_constant_overflow(const gassign *stmt, const_tree rhs);
++extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gassign *stmt, tree change_rhs, tree new_rhs2);
+extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs);
+extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs);
-+extern enum intentional_overflow_type add_mul_intentional_overflow(const_gimple def_stmt);
-+extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt);
++extern enum intentional_overflow_type add_mul_intentional_overflow(const gassign *def_stmt);
++extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt);
+
+
+// insert_size_overflow_check_ipa.c
@@ -128093,6 +129123,7 @@ index 0000000..37f8fc3
+// misc.c
+extern void set_current_function_decl(tree fndecl);
+extern void unset_current_function_decl(void);
++extern tree get_lhs(const_gimple stmt);
+extern gimple get_def_stmt(const_tree node);
+extern tree create_new_var(tree type);
+extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force);
@@ -128104,28 +129135,29 @@ index 0000000..37f8fc3
+// insert_size_overflow_check_core.c
+extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs);
+extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
-+extern tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
++extern tree dup_assign(struct visited *visited, gassign *oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
+extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before);
+
+
+// remove_unnecessary_dup.c
+extern struct opt_pass *make_remove_unnecessary_dup_pass(void);
-+extern void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type);
-+extern bool skip_expr_on_double_type(const_gimple stmt);
-+extern void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs);
++extern void insert_cast_expr(struct visited *visited, gassign *stmt, enum intentional_overflow_type type);
++extern bool skip_expr_on_double_type(const gassign *stmt);
++extern void create_up_and_down_cast(struct visited *visited, gassign *use_stmt, tree orig_type, tree rhs);
+
+#endif
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
new file mode 100644
-index 0000000..4378111
+index 0000000..176c32f
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,123 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -128142,7 +129174,7 @@ index 0000000..4378111
+
+#include "gcc-common.h"
+
-+static unsigned int dump_functions(void)
++static unsigned int __unused dump_functions(void)
+{
+ struct cgraph_node *node;
+
@@ -128177,6 +129209,7 @@ index 0000000..4378111
+}
+
+#if BUILDING_GCC_VERSION >= 4009
++namespace {
+static const struct pass_data dump_pass_data = {
+#else
+static struct ipa_opt_pass_d dump_pass = {
@@ -128187,7 +129220,8 @@ index 0000000..4378111
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
-+#if BUILDING_GCC_VERSION >= 4009
++#if BUILDING_GCC_VERSION >= 5000
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -128220,23 +129254,27 @@ index 0000000..4378111
+};
+
+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
+class dump_pass : public ipa_opt_pass_d {
+public:
+ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return dump_functions(); }
++#else
+ unsigned int execute() { return dump_functions(); }
++#endif
+};
+}
-+#endif
+
-+struct opt_pass *make_dump_pass(void)
++opt_pass *make_dump_pass(void)
+{
-+#if BUILDING_GCC_VERSION >= 4009
+ return new dump_pass();
++}
+#else
++struct opt_pass *make_dump_pass(void)
++{
+ return &dump_pass.pass;
-+#endif
+}
++#endif
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
index 0000000..51560ee
@@ -134404,15 +135442,16 @@ index 0000000..560cd7b
+zpios_read_64734 zpios_read 3 64734 NULL
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..95f7abd
+index 0000000..7e07890
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
-@@ -0,0 +1,259 @@
+@@ -0,0 +1,260 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -134440,7 +135479,7 @@ index 0000000..95f7abd
+tree size_overflow_type_TI;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140725",
++ .version = "20140725_01",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -134494,7 +135533,7 @@ index 0000000..95f7abd
+ return NULL_TREE;
+ }
+
-+ if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
++ if (tree_to_shwi(TREE_VALUE(args)) != 0)
+ return NULL_TREE;
+
+ for (; args; args = TREE_CHAIN(args)) {
@@ -134669,15 +135708,16 @@ index 0000000..95f7abd
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
new file mode 100644
-index 0000000..0888f6c
+index 0000000..2a693fe
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
-@@ -0,0 +1,364 @@
+@@ -0,0 +1,355 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -134905,43 +135945,33 @@ index 0000000..0888f6c
+ return CANNOT_FIND_ARG;
+}
+
-+static const char *get_asm_string(const_gimple stmt)
-+{
-+ if (!stmt)
-+ return NULL;
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ return NULL;
-+
-+ return gimple_asm_string(stmt);
-+}
-+
-+bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
++bool is_size_overflow_intentional_asm_turn_off(const gasm *stmt)
+{
+ const char *str;
+
-+ str = get_asm_string(stmt);
-+ if (!str)
++ if (!stmt)
+ return false;
++ str = gimple_asm_string(stmt);
+ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
+}
+
-+bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
++bool is_size_overflow_intentional_asm_yes(const gasm *stmt)
+{
+ const char *str;
+
-+ str = get_asm_string(stmt);
-+ if (!str)
++ if (!stmt)
+ return false;
++ str = gimple_asm_string(stmt);
+ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
+}
+
-+bool is_size_overflow_asm(const_gimple stmt)
++bool is_size_overflow_asm(const gasm *stmt)
+{
+ const char *str;
+
-+ str = get_asm_string(stmt);
-+ if (!str)
++ if (!stmt)
+ return false;
++ str = gimple_asm_string(stmt);
+ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
+}
+
diff --git a/3.2.69/0000_README b/3.2.69/0000_README
index 0df9a58..9b79be0 100644
--- a/3.2.69/0000_README
+++ b/3.2.69/0000_README
@@ -194,7 +194,7 @@ Patch: 1068_linux-3.2.69.patch
From: http://www.kernel.org
Desc: Linux 3.2.69
-Patch: 4420_grsecurity-3.1-3.2.69-201507111207.patch
+Patch: 4420_grsecurity-3.1-3.2.69-201507251415.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.69/4420_grsecurity-3.1-3.2.69-201507111207.patch b/3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch
index d2caf34..11686d8 100644
--- a/3.2.69/4420_grsecurity-3.1-3.2.69-201507111207.patch
+++ b/3.2.69/4420_grsecurity-3.1-3.2.69-201507251415.patch
@@ -281,6 +281,39 @@ index 88fd7f5..b318a78 100644
==============================================================
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 2a68089..b3300e1 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -36,6 +36,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print [ X86 only ]
+ - l2cr [ PPC only ]
++- modify_ldt [ X86 only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msgmax
+@@ -318,6 +319,20 @@ This flag controls the L2 cache of G3 processor boards. If
+
+ ==============================================================
+
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+
+ A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
index 8071888..b024b7b 100644
--- a/Makefile
@@ -10454,7 +10487,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 28a1bca..6eebf04 100644
+index 28a1bca..0443883 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -75,6 +75,7 @@ config X86
@@ -10554,6 +10587,29 @@ index 28a1bca..6eebf04 100644
---help---
Map the 32-bit VDSO to the predictable old-style address too.
+@@ -1720,6 +1728,22 @@ config CMDLINE_OVERRIDE
+ This is used to work around broken boot loaders. This should
+ be set to 'N' under normal conditions.
+
++config DEFAULT_MODIFY_LDT_SYSCALL
++ bool "Allow userspace to modify the LDT by default"
++ default y
++
++ ---help---
++ Modifying the LDT (Local Descriptor Table) may be needed to run a
++ 16-bit or segmented code such as Dosemu or Wine. This is done via
++ a system call which is not needed to run portable applications,
++ and which can sometimes be abused to exploit some weaknesses of
++ the architecture, opening new vulnerabilities.
++
++ For this reason this option allows one to enable or disable the
++ feature at runtime. It is recommended to say 'N' here to leave
++ the system protected, and to enable it at runtime only if needed
++ by setting the sys.kernel.modify_ldt sysctl.
++
+ endmenu
+
+ config ARCH_ENABLE_MEMORY_HOTPLUG
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index e3ca7e0..b30b28a 100644
--- a/arch/x86/Kconfig.cpu
@@ -22436,10 +22492,33 @@ index 4b6701e..1a3dcdb 100644
};
#endif
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
-index 0a8e65e..6e8de34 100644
+index 0a8e65e..563640b 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
-@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+@@ -11,6 +11,7 @@
+ #include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
++#include <linux/ratelimit.h>
+ #include <linux/smp.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+@@ -21,6 +22,14 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+
++#ifdef CONFIG_GRKERNSEC
++int sysctl_modify_ldt __read_only = 0;
++#elif defined(CONFIG_DEFAULT_MODIFY_LDT_SYSCALL)
++int sysctl_modify_ldt __read_only = 1;
++#else
++int sysctl_modify_ldt __read_only = 0;
++#endif
++
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -67,13 +76,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
if (reload) {
#ifdef CONFIG_SMP
preempt_disable();
@@ -22455,7 +22534,7 @@ index 0a8e65e..6e8de34 100644
#endif
}
if (oldsize) {
-@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+@@ -95,7 +104,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
return err;
for (i = 0; i < old->size; i++)
@@ -22464,7 +22543,7 @@ index 0a8e65e..6e8de34 100644
return 0;
}
-@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+@@ -116,6 +125,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
retval = copy_ldt(&mm->context, &old_mm->context);
mutex_unlock(&old_mm->context.lock);
}
@@ -22489,7 +22568,7 @@ index 0a8e65e..6e8de34 100644
return retval;
}
-@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+@@ -230,6 +257,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
}
}
@@ -22503,6 +22582,21 @@ index 0a8e65e..6e8de34 100644
if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
error = -EINVAL;
goto out_unlock;
+@@ -255,6 +289,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
+ {
+ int ret = -ENOSYS;
+
++ if (!sysctl_modify_ldt) {
++ printk_ratelimited(KERN_INFO
++ "Denied a call to modify_ldt() from %s[%d] (uid: %d)."
++ " Adjust sysctl if this was not an exploit attempt.\n",
++ current->comm, task_pid_nr(current), current_uid());
++ return ret;
++ }
++
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index a3fa43b..8966f4c 100644
--- a/arch/x86/kernel/machine_kexec_32.c
@@ -46312,7 +46406,7 @@ index fed39de..8adf3152 100644
};
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
-index 7300447..fa23d39 100644
+index 7300447..cb83d3e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -351,7 +351,7 @@ static void macvtap_setup(struct net_device *dev)
@@ -46342,6 +46436,14 @@ index 7300447..fa23d39 100644
.notifier_call = macvtap_device_event,
};
+@@ -1151,6 +1151,7 @@ static void macvtap_exit(void)
+ class_unregister(macvtap_class);
+ cdev_del(&macvtap_cdev);
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
++ idr_destroy(&minor_idr);
+ }
+ module_exit(macvtap_exit);
+
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5a..9a9d0ae 100644
--- a/drivers/net/phy/phy_device.c
@@ -46804,6 +46906,51 @@ index 3d21742..b8e03e7 100644
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 28ceef2..655b059 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1140,7 +1140,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ static const u32 rxprod_reg[2] = {
+ VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
+ };
+- u32 num_rxd = 0;
++ u32 num_pkts = 0;
+ bool skip_page_frags = false;
+ struct Vmxnet3_RxCompDesc *rcd;
+ struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
+@@ -1158,13 +1158,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ struct Vmxnet3_RxDesc *rxd;
+ u32 idx, ring_idx;
+ struct vmxnet3_cmd_ring *ring = NULL;
+- if (num_rxd >= quota) {
++ if (num_pkts >= quota) {
+ /* we may stop even before we see the EOP desc of
+ * the current pkt
+ */
+ break;
+ }
+- num_rxd++;
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
+ idx = rcd->rxdIdx;
+ ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+@@ -1288,6 +1287,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ napi_gro_receive(&rq->napi, skb);
+
+ ctx->skb = NULL;
++ num_pkts++;
+ }
+
+ rcd_done:
+@@ -1319,7 +1319,7 @@ rcd_done:
+ &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
+ }
+
+- return num_rxd;
++ return num_pkts;
+ }
+
+
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e662cbc..8d4a102 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -56737,10 +56884,20 @@ index 2524e4c..2962cc6a 100644
if (retval > 0)
retval = 0;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
-index 879ed88..bc03a01 100644
+index 879ed88..dbaf762 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
-@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -527,8 +527,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ unlock_new_inode(inode);
+ return inode;
+ error:
+- unlock_new_inode(inode);
+- iput(inode);
++ iget_failed(inode);
+ return ERR_PTR(retval);
+
+ }
+@@ -1286,7 +1285,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
void
v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
{
@@ -56749,6 +56906,20 @@ index 879ed88..bc03a01 100644
P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
IS_ERR(s) ? "<error>" : s);
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 30d4fa8..dbbc83f 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -169,8 +169,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ unlock_new_inode(inode);
+ return inode;
+ error:
+- unlock_new_inode(inode);
+- iput(inode);
++ iget_failed(inode);
+ return ERR_PTR(retval);
+
+ }
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index c70251d..fe305fd 100644
--- a/fs/9p/vfs_super.c
@@ -59166,7 +59337,7 @@ index 739fb59..5385976 100644
static int __init init_cramfs_fs(void)
{
diff --git a/fs/dcache.c b/fs/dcache.c
-index 8bc98af..2cc0298 100644
+index 8bc98af..68601d9 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -103,11 +103,11 @@ static unsigned int d_hash_shift __read_mostly;
@@ -59185,7 +59356,31 @@ index 8bc98af..2cc0298 100644
return dentry_hashtable + (hash & D_HASHMASK);
}
-@@ -1016,13 +1016,13 @@ ascend:
+@@ -478,15 +478,18 @@ repeat:
+ return;
+ }
+
+- if (dentry->d_flags & DCACHE_OP_DELETE) {
++ /* Unreachable? Get rid of it */
++ if (unlikely(d_unhashed(dentry)))
++ goto kill_it;
++
++ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++ goto kill_it;
++
++ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
+ if (dentry->d_op->d_delete(dentry))
+ goto kill_it;
+ }
+
+- /* Unreachable? Get rid of it */
+- if (d_unhashed(dentry))
+- goto kill_it;
+-
+ /*
+ * If this dentry needs lookup, don't set the referenced flag so that it
+ * is more likely to be cleaned up by the dcache shrinker in case of
+@@ -1016,13 +1019,13 @@ ascend:
/* might go back up the wrong parent if we have had a rename */
if (!locked && read_seqretry(&rename_lock, seq))
goto rename_retry;
@@ -59203,7 +59398,7 @@ index 8bc98af..2cc0298 100644
rcu_read_unlock();
goto resume;
}
-@@ -1235,6 +1235,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
+@@ -1235,6 +1238,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dentry->d_sb = sb;
dentry->d_op = NULL;
dentry->d_fsdata = NULL;
@@ -59213,7 +59408,7 @@ index 8bc98af..2cc0298 100644
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
-@@ -3082,7 +3085,8 @@ void __init vfs_caches_init(unsigned long mempages)
+@@ -3082,7 +3088,8 @@ void __init vfs_caches_init(unsigned long mempages)
mempages -= reserve;
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
@@ -73953,7 +74148,7 @@ index 0000000..25f54ef
+};
diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
new file mode 100644
-index 0000000..62916b2
+index 0000000..edcb09b
--- /dev/null
+++ b/grsecurity/gracl_policy.c
@@ -0,0 +1,1780 @@
@@ -74411,7 +74606,7 @@ index 0000000..62916b2
+ get_fs_root(reaper->fs, &gr_real_root);
+
+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
-+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
++ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", gr_get_dev_from_dentry(gr_real_root.dentry), gr_get_ino_from_dentry(gr_real_root.dentry));
+#endif
+
+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
@@ -77353,7 +77548,7 @@ index 0000000..8ca18bf
+}
diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
new file mode 100644
-index 0000000..b09101d
+index 0000000..68121e2
--- /dev/null
+++ b/grsecurity/grsec_init.c
@@ -0,0 +1,290 @@
@@ -77366,61 +77561,61 @@ index 0000000..b09101d
+#include <linux/percpu.h>
+#include <linux/module.h>
+
-+int grsec_enable_ptrace_readexec;
-+int grsec_enable_setxid;
-+int grsec_enable_symlinkown;
-+int grsec_symlinkown_gid;
-+int grsec_enable_brute;
-+int grsec_enable_link;
-+int grsec_enable_dmesg;
-+int grsec_enable_harden_ptrace;
-+int grsec_enable_harden_ipc;
-+int grsec_enable_fifo;
-+int grsec_enable_execlog;
-+int grsec_enable_signal;
-+int grsec_enable_forkfail;
-+int grsec_enable_audit_ptrace;
-+int grsec_enable_time;
-+int grsec_enable_group;
-+int grsec_audit_gid;
-+int grsec_enable_chdir;
-+int grsec_enable_mount;
-+int grsec_enable_rofs;
-+int grsec_deny_new_usb;
-+int grsec_enable_chroot_findtask;
-+int grsec_enable_chroot_mount;
-+int grsec_enable_chroot_shmat;
-+int grsec_enable_chroot_fchdir;
-+int grsec_enable_chroot_double;
-+int grsec_enable_chroot_pivot;
-+int grsec_enable_chroot_chdir;
-+int grsec_enable_chroot_chmod;
-+int grsec_enable_chroot_mknod;
-+int grsec_enable_chroot_nice;
-+int grsec_enable_chroot_execlog;
-+int grsec_enable_chroot_caps;
-+int grsec_enable_chroot_rename;
-+int grsec_enable_chroot_sysctl;
-+int grsec_enable_chroot_unix;
-+int grsec_enable_tpe;
-+int grsec_tpe_gid;
-+int grsec_enable_blackhole;
++int grsec_enable_ptrace_readexec __read_only;
++int grsec_enable_setxid __read_only;
++int grsec_enable_symlinkown __read_only;
++int grsec_symlinkown_gid __read_only;
++int grsec_enable_brute __read_only;
++int grsec_enable_link __read_only;
++int grsec_enable_dmesg __read_only;
++int grsec_enable_harden_ptrace __read_only;
++int grsec_enable_harden_ipc __read_only;
++int grsec_enable_fifo __read_only;
++int grsec_enable_execlog __read_only;
++int grsec_enable_signal __read_only;
++int grsec_enable_forkfail __read_only;
++int grsec_enable_audit_ptrace __read_only;
++int grsec_enable_time __read_only;
++int grsec_enable_group __read_only;
++int grsec_audit_gid __read_only;
++int grsec_enable_chdir __read_only;
++int grsec_enable_mount __read_only;
++int grsec_enable_rofs __read_only;
++int grsec_deny_new_usb __read_only;
++int grsec_enable_chroot_findtask __read_only;
++int grsec_enable_chroot_mount __read_only;
++int grsec_enable_chroot_shmat __read_only;
++int grsec_enable_chroot_fchdir __read_only;
++int grsec_enable_chroot_double __read_only;
++int grsec_enable_chroot_pivot __read_only;
++int grsec_enable_chroot_chdir __read_only;
++int grsec_enable_chroot_chmod __read_only;
++int grsec_enable_chroot_mknod __read_only;
++int grsec_enable_chroot_nice __read_only;
++int grsec_enable_chroot_execlog __read_only;
++int grsec_enable_chroot_caps __read_only;
++int grsec_enable_chroot_rename __read_only;
++int grsec_enable_chroot_sysctl __read_only;
++int grsec_enable_chroot_unix __read_only;
++int grsec_enable_tpe __read_only;
++int grsec_tpe_gid __read_only;
++int grsec_enable_blackhole __read_only;
+#ifdef CONFIG_IPV6_MODULE
+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
+#endif
-+int grsec_lastack_retries;
-+int grsec_enable_tpe_all;
-+int grsec_enable_tpe_invert;
-+int grsec_enable_socket_all;
-+int grsec_socket_all_gid;
-+int grsec_enable_socket_client;
-+int grsec_socket_client_gid;
-+int grsec_enable_socket_server;
-+int grsec_socket_server_gid;
-+int grsec_resource_logging;
-+int grsec_disable_privio;
-+int grsec_enable_log_rwxmaps;
-+int grsec_lock;
++int grsec_lastack_retries __read_only;
++int grsec_enable_tpe_all __read_only;
++int grsec_enable_tpe_invert __read_only;
++int grsec_enable_socket_all __read_only;
++int grsec_socket_all_gid __read_only;
++int grsec_enable_socket_client __read_only;
++int grsec_socket_client_gid __read_only;
++int grsec_enable_socket_server __read_only;
++int grsec_socket_server_gid __read_only;
++int grsec_resource_logging __read_only;
++int grsec_disable_privio __read_only;
++int grsec_enable_log_rwxmaps __read_only;
++int grsec_lock __read_only;
+
+DEFINE_SPINLOCK(grsec_alert_lock);
+unsigned long grsec_alert_wtime = 0;
@@ -78849,7 +79044,7 @@ index 0000000..a523bd2
+}
diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
new file mode 100644
-index 0000000..a51b175
+index 0000000..a3b8942
--- /dev/null
+++ b/grsecurity/grsec_sysctl.c
@@ -0,0 +1,486 @@
@@ -78886,7 +79081,7 @@ index 0000000..a51b175
+ .data = &grsec_disable_privio,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#endif
@@ -78896,7 +79091,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_link,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
@@ -78905,14 +79100,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_symlinkown,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "symlinkown_gid",
+ .data = &grsec_symlinkown_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_BRUTE
@@ -78921,7 +79116,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_brute,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_FIFO
@@ -78930,7 +79125,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_fifo,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
@@ -78939,7 +79134,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_ptrace_readexec,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SETXID
@@ -78948,7 +79143,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_setxid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
@@ -78957,14 +79152,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_blackhole,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "lastack_retries",
+ .data = &grsec_lastack_retries,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_EXECLOG
@@ -78973,7 +79168,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_execlog,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
@@ -78982,7 +79177,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_log_rwxmaps,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SIGNAL
@@ -78991,7 +79186,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_signal,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_FORKFAIL
@@ -79000,7 +79195,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_forkfail,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TIME
@@ -79009,7 +79204,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_time,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
@@ -79018,7 +79213,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_shmat,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
@@ -79027,7 +79222,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_unix,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
@@ -79036,7 +79231,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_mount,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
@@ -79045,7 +79240,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_fchdir,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
@@ -79054,7 +79249,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_double,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
@@ -79063,7 +79258,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_pivot,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
@@ -79072,7 +79267,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_chdir,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
@@ -79081,7 +79276,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_chmod,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
@@ -79090,7 +79285,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_mknod,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
@@ -79099,7 +79294,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_nice,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
@@ -79108,7 +79303,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_execlog,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
@@ -79117,7 +79312,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_caps,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
@@ -79126,7 +79321,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_rename,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
@@ -79135,7 +79330,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_sysctl,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE
@@ -79144,14 +79339,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_tpe,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "tpe_gid",
+ .data = &grsec_tpe_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
@@ -79160,7 +79355,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_tpe_invert,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_TPE_ALL
@@ -79169,7 +79364,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_tpe_all,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
@@ -79178,14 +79373,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_socket_all,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "socket_all_gid",
+ .data = &grsec_socket_all_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
@@ -79194,14 +79389,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_socket_client,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "socket_client_gid",
+ .data = &grsec_socket_client_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
@@ -79210,14 +79405,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_socket_server,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "socket_server_gid",
+ .data = &grsec_socket_server_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
@@ -79226,14 +79421,14 @@ index 0000000..a51b175
+ .data = &grsec_enable_group,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+ {
+ .procname = "audit_gid",
+ .data = &grsec_audit_gid,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
@@ -79242,7 +79437,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chdir,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
@@ -79251,7 +79446,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_mount,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_DMESG
@@ -79260,7 +79455,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_dmesg,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
@@ -79269,7 +79464,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_chroot_findtask,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_RESLOG
@@ -79278,7 +79473,7 @@ index 0000000..a51b175
+ .data = &grsec_resource_logging,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
@@ -79287,7 +79482,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_audit_ptrace,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
@@ -79296,7 +79491,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_harden_ptrace,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
@@ -79305,7 +79500,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_harden_ipc,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+ {
@@ -79313,7 +79508,7 @@ index 0000000..a51b175
+ .data = &grsec_lock,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+#ifdef CONFIG_GRKERNSEC_ROFS
@@ -79322,7 +79517,7 @@ index 0000000..a51b175
+ .data = &grsec_enable_rofs,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec_minmax,
++ .proc_handler = &proc_dointvec_minmax_secure,
+ .extra1 = &one,
+ .extra2 = &one,
+ },
@@ -79333,7 +79528,7 @@ index 0000000..a51b175
+ .data = &grsec_deny_new_usb,
+ .maxlen = sizeof(int),
+ .mode = 0600,
-+ .proc_handler = &proc_dointvec,
++ .proc_handler = &proc_dointvec_secure,
+ },
+#endif
+ { }
@@ -80750,6 +80945,19 @@ index 04ffb2e..6799180 100644
extern struct cleancache_ops
cleancache_register_ops(struct cleancache_ops *ops);
+diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
+index d9a4fd0..13edc9f 100644
+--- a/include/linux/clkdev.h
++++ b/include/linux/clkdev.h
+@@ -32,7 +32,7 @@ struct clk_lookup {
+ }
+
+ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
+- const char *dev_fmt, ...);
++ const char *dev_fmt, ...) __printf(3, 4);
+
+ void clkdev_add(struct clk_lookup *cl);
+ void clkdev_drop(struct clk_lookup *cl);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 081147d..da89543 100644
--- a/include/linux/clocksource.h
@@ -80764,7 +80972,7 @@ index 081147d..da89543 100644
extern void
diff --git a/include/linux/compat.h b/include/linux/compat.h
-index d42bd48..a20850d 100644
+index d42bd48..f651bd9 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -240,10 +240,10 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
@@ -80780,6 +80988,15 @@ index d42bd48..a20850d 100644
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+@@ -320,7 +320,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+
+ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
+
+-extern int compat_printk(const char *fmt, ...);
++extern __printf(1, 2) int compat_printk(const char *fmt, ...);
+ extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
+
+ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
@@ -334,7 +334,7 @@ extern int compat_ptrace_request(struct task_struct *child,
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
@@ -80833,10 +81050,10 @@ index 59a7e4c..8feb590 100644
#if __GNUC_MINOR__ > 0
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
-index cdd1cc2..9c1ee22 100644
+index cdd1cc2..d062745 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
-@@ -28,6 +28,31 @@
+@@ -28,6 +28,30 @@
# define __compiletime_error(message) __attribute__((error(message)))
#endif /* __CHECKER__ */
@@ -80856,7 +81073,6 @@ index cdd1cc2..9c1ee22 100644
+#endif
+
+#ifdef SIZE_OVERFLOW_PLUGIN
-+#error not yet
+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
+#endif
@@ -80868,7 +81084,7 @@ index cdd1cc2..9c1ee22 100644
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
-@@ -53,7 +78,6 @@
+@@ -53,7 +77,6 @@
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
*
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
@@ -81066,19 +81282,20 @@ index 51494e6..340575ab 100644
extern bool completion_done(struct completion *x);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
-index 3081c58..5a0b545 100644
+index 3081c58..80789a0 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
-@@ -64,7 +64,7 @@ struct config_item {
+@@ -64,7 +64,8 @@ struct config_item {
struct dentry *ci_dentry;
};
-extern int config_item_set_name(struct config_item *, const char *, ...);
-+extern __printf(2, 3) int config_item_set_name(struct config_item *, const char *, ...);
++extern __printf(2, 3)
++int config_item_set_name(struct config_item *, const char *, ...);
static inline char *config_item_name(struct config_item * item)
{
-@@ -125,7 +125,7 @@ struct configfs_attribute {
+@@ -125,7 +126,7 @@ struct configfs_attribute {
const char *ca_name;
struct module *ca_owner;
mode_t ca_mode;
@@ -81279,7 +81496,7 @@ index 8acfe31..6ffccd63 100644
return c | 0x20;
}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index 99374de..ac23d39 100644
+index 99374de..01feff6 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -132,6 +132,9 @@ struct dentry {
@@ -81301,6 +81518,16 @@ index 99374de..ac23d39 100644
/*
* dentry->d_lock spinlock nesting subclasses:
+@@ -340,7 +343,8 @@ extern int d_validate(struct dentry *, struct dentry *);
+ /*
+ * helper function for dentry_operations.d_dname() members
+ */
+-extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
++extern __printf(4, 5)
++char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+
+ extern char *__d_path(const struct path *, const struct path *, char *, int);
+ extern char *d_absolute_path(const struct path *, char *, int);
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 7925bf0..d5143d2 100644
--- a/include/linux/decompress/mm.h
@@ -81315,7 +81542,7 @@ index 7925bf0..d5143d2 100644
#define large_malloc(a) vmalloc(a)
diff --git a/include/linux/device.h b/include/linux/device.h
-index a31c5d0..ff3d03b 100644
+index a31c5d0..e9e8aac 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -427,7 +427,7 @@ struct device_type {
@@ -81335,6 +81562,23 @@ index a31c5d0..ff3d03b 100644
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+@@ -757,12 +758,10 @@ extern int __must_check device_reprobe(struct device *dev);
+ /*
+ * Easy functions for dynamically creating devices on the fly
+ */
+-extern struct device *device_create_vargs(struct class *cls,
+- struct device *parent,
+- dev_t devt,
+- void *drvdata,
+- const char *fmt,
+- va_list vargs);
++extern __printf(5, 0)
++struct device *device_create_vargs(struct class *cls, struct device *parent,
++ dev_t devt, void *drvdata,
++ const char *fmt, va_list vargs);
+ extern __printf(5, 6)
+ struct device *device_create(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e13117c..e9fc938 100644
--- a/include/linux/dma-mapping.h
@@ -83441,10 +83685,42 @@ index 3875719..4663bc3 100644
/* This macro allows us to keep printk typechecking */
static __printf(1, 2)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
-index dcf6a8b..e1f7aa5 100644
+index dcf6a8b..a182533 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -698,24 +698,30 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
+@@ -326,7 +326,8 @@ extern __printf(3, 0)
+ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+ extern __printf(2, 3)
+ char *kasprintf(gfp_t gfp, const char *fmt, ...);
+-extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
++extern __printf(2, 0)
++char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+
+ extern int sscanf(const char *, const char *, ...)
+ __attribute__ ((format (scanf, 2, 3)));
+@@ -514,10 +515,10 @@ do { \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+ } while (0)
+
+-extern int
++extern __printf(2, 0) int
+ __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+-extern int
++extern __printf(2, 0) int
+ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
+@@ -534,7 +535,7 @@ trace_printk(const char *fmt, ...)
+ {
+ return 0;
+ }
+-static inline int
++static __printf(1, 0) inline int
+ ftrace_vprintk(const char *fmt, va_list ap)
+ {
+ return 0;
+@@ -698,24 +699,30 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @condition: the condition which the compiler should know is false.
*
* If you have some code which relies on certain constants being equal, or
@@ -83560,10 +83836,22 @@ index f8d4b27..8560882 100644
char **envp;
enum umh_wait wait;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
-index 445f978..24e427c 100644
+index 445f978..6b3fc2c 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
-@@ -111,7 +111,7 @@ struct kobj_type {
+@@ -74,8 +74,9 @@ struct kobject {
+
+ extern __printf(2, 3)
+ int kobject_set_name(struct kobject *kobj, const char *name, ...);
+-extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+- va_list vargs);
++extern __printf(2, 0)
++int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
++ va_list vargs);
+
+ static inline const char *kobject_name(const struct kobject *kobj)
+ {
+@@ -111,7 +112,7 @@ struct kobj_type {
struct attribute **default_attrs;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
@@ -83572,7 +83860,7 @@ index 445f978..24e427c 100644
struct kobj_uevent_env {
char *envp[UEVENT_NUM_ENVP];
-@@ -134,6 +134,7 @@ struct kobj_attribute {
+@@ -134,6 +135,7 @@ struct kobj_attribute {
ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
};
@@ -83580,7 +83868,7 @@ index 445f978..24e427c 100644
extern const struct sysfs_ops kobj_sysfs_ops;
-@@ -161,7 +162,7 @@ struct kset {
+@@ -161,7 +163,7 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
@@ -84064,7 +84352,7 @@ index 174a844..11483c2 100644
/*
* Standard errno values are used for errors, but some have specific
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
-index c5d5278..f0b68c8 100644
+index c5d5278..85cd5ce 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
@@ -84085,6 +84373,14 @@ index c5d5278..f0b68c8 100644
{
}
+@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
+ extern void disable_mmiotrace(void);
+ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
+ extern void mmio_trace_mapping(struct mmiotrace_map *map);
+-extern int mmio_trace_printk(const char *fmt, va_list args);
++extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
+
+ #endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index ee2baf0..e24a58c 100644
--- a/include/linux/mmu_notifier.h
@@ -86448,7 +86744,7 @@ index 27b3b0b..e093dd9 100644
extern void register_syscore_ops(struct syscore_ops *ops);
extern void unregister_syscore_ops(struct syscore_ops *ops);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
-index 703cfa33..305427e 100644
+index 703cfa33..98e3375 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -155,8 +155,6 @@ enum
@@ -86460,7 +86756,7 @@ index 703cfa33..305427e 100644
/* CTL_VM names: */
enum
{
-@@ -961,13 +959,13 @@ extern void sysctl_head_finish(struct ctl_table_header *prev);
+@@ -961,17 +959,21 @@ extern void sysctl_head_finish(struct ctl_table_header *prev);
extern int sysctl_perm(struct ctl_table_root *root,
struct ctl_table *table, int op);
@@ -86475,8 +86771,16 @@ index 703cfa33..305427e 100644
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
++extern int proc_dointvec_secure(struct ctl_table *, int,
++ void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
-@@ -1045,7 +1043,9 @@ struct ctl_table
+ void __user *, size_t *, loff_t *);
++extern int proc_dointvec_minmax_secure(struct ctl_table *, int,
++ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_jiffies(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+ extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
+@@ -1045,7 +1047,9 @@ struct ctl_table
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
@@ -90107,7 +90411,7 @@ index f56af55..657c675 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 4277095..836fd7d 100644
+index 4277095..c1440e1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -146,8 +146,15 @@ static struct srcu_struct pmus_srcu;
@@ -90118,11 +90422,11 @@ index 4277095..836fd7d 100644
*/
-int sysctl_perf_event_paranoid __read_mostly = 1;
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
-+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
++int sysctl_perf_event_legitimately_concerned __read_only = 3;
+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
-+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
++int sysctl_perf_event_legitimately_concerned __read_only = 2;
+#else
-+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
++int sysctl_perf_event_legitimately_concerned __read_only = 1;
+#endif
/* Minimum for 512 kiB + 1 user control page */
@@ -91612,7 +91916,7 @@ index 91c32a0..7b88d63 100644
seq_printf(m, "%40s %14lu %29s %pS\n",
name, stats->contending_point[i],
diff --git a/kernel/module.c b/kernel/module.c
-index 95ecd9f..dfa3a9b 100644
+index 95ecd9f..db549a6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -58,6 +58,7 @@
@@ -91623,6 +91927,15 @@ index 95ecd9f..dfa3a9b 100644
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>
+@@ -110,7 +111,7 @@ struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+
+
+ /* Block module loading/unloading? */
+-int modules_disabled = 0;
++int modules_disabled __read_only = 0;
+
+ /* Waiting for a module to finish initializing? */
+ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
/* Bounds of module allocation, for speeding __module_address.
@@ -92941,6 +93254,2924 @@ index c073f43..ced569b 100644
if (syslog_action_restricted(type)) {
if (capable(CAP_SYSLOG))
return 0;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+new file mode 100644
+index 0000000..ba98f34
+--- /dev/null
++++ b/kernel/printk/printk.c
+@@ -0,0 +1,2912 @@
++/*
++ * linux/kernel/printk.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Modified to make sys_syslog() more flexible: added commands to
++ * return the last 4k of kernel messages, regardless of whether
++ * they've been read or not. Added option to suppress kernel printk's
++ * to the console. Added hook for sending the console messages
++ * elsewhere, in preparation for a serial line console (someday).
++ * Ted Ts'o, 2/11/93.
++ * Modified for sysctl support, 1/8/97, Chris Horn.
++ * Fixed SMP synchronization, 08/08/99, Manfred Spraul
++ * manfred@colorfullife.com
++ * Rewrote bits to get rid of console_lock
++ * 01Mar01 Andrew Morton
++ */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/tty_driver.h>
++#include <linux/console.h>
++#include <linux/init.h>
++#include <linux/jiffies.h>
++#include <linux/nmi.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/interrupt.h> /* For in_interrupt() */
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/security.h>
++#include <linux/bootmem.h>
++#include <linux/memblock.h>
++#include <linux/aio.h>
++#include <linux/syscalls.h>
++#include <linux/kexec.h>
++#include <linux/kdb.h>
++#include <linux/ratelimit.h>
++#include <linux/kmsg_dump.h>
++#include <linux/syslog.h>
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/rculist.h>
++#include <linux/poll.h>
++#include <linux/irq_work.h>
++#include <linux/utsname.h>
++
++#include <asm/uaccess.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/printk.h>
++
++#include "console_cmdline.h"
++#include "braille.h"
++
++/* printk's without a loglevel use this.. */
++#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
++
++/* We show everything that is MORE important than this.. */
++#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
++#define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
++
++int console_printk[4] = {
++ DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */
++ DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */
++ MINIMUM_CONSOLE_LOGLEVEL, /* minimum_console_loglevel */
++ DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
++};
++
++/*
++ * Low level drivers may need that to know if they can schedule in
++ * their unblank() callback or not. So let's export it.
++ */
++int oops_in_progress;
++EXPORT_SYMBOL(oops_in_progress);
++
++/*
++ * console_sem protects the console_drivers list, and also
++ * provides serialisation for access to the entire console
++ * driver system.
++ */
++static DEFINE_SEMAPHORE(console_sem);
++struct console *console_drivers;
++EXPORT_SYMBOL_GPL(console_drivers);
++
++#ifdef CONFIG_LOCKDEP
++static struct lockdep_map console_lock_dep_map = {
++ .name = "console_lock"
++};
++#endif
++
++/*
++ * This is used for debugging the mess that is the VT code by
++ * keeping track if we have the console semaphore held. It's
++ * definitely not the perfect debug tool (we don't know if _WE_
++ * hold it are racing, but it helps tracking those weird code
++ * path in the console code where we end up in places I want
++ * locked without the console sempahore held
++ */
++static int console_locked, console_suspended;
++
++/*
++ * If exclusive_console is non-NULL then only this console is to be printed to.
++ */
++static struct console *exclusive_console;
++
++/*
++ * Array of consoles built from command line options (console=)
++ */
++
++#define MAX_CMDLINECONSOLES 8
++
++static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
++
++static int selected_console = -1;
++static int preferred_console = -1;
++int console_set_on_cmdline;
++EXPORT_SYMBOL(console_set_on_cmdline);
++
++/* Flag: console code may call schedule() */
++static int console_may_schedule;
++
++/*
++ * The printk log buffer consists of a chain of concatenated variable
++ * length records. Every record starts with a record header, containing
++ * the overall length of the record.
++ *
++ * The heads to the first and last entry in the buffer, as well as the
++ * sequence numbers of these both entries are maintained when messages
++ * are stored..
++ *
++ * If the heads indicate available messages, the length in the header
++ * tells the start next message. A length == 0 for the next message
++ * indicates a wrap-around to the beginning of the buffer.
++ *
++ * Every record carries the monotonic timestamp in microseconds, as well as
++ * the standard userspace syslog level and syslog facility. The usual
++ * kernel messages use LOG_KERN; userspace-injected messages always carry
++ * a matching syslog facility, by default LOG_USER. The origin of every
++ * message can be reliably determined that way.
++ *
++ * The human readable log message directly follows the message header. The
++ * length of the message text is stored in the header, the stored message
++ * is not terminated.
++ *
++ * Optionally, a message can carry a dictionary of properties (key/value pairs),
++ * to provide userspace with a machine-readable message context.
++ *
++ * Examples for well-defined, commonly used property names are:
++ * DEVICE=b12:8 device identifier
++ * b12:8 block dev_t
++ * c127:3 char dev_t
++ * n8 netdev ifindex
++ * +sound:card0 subsystem:devname
++ * SUBSYSTEM=pci driver-core subsystem name
++ *
++ * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
++ * follows directly after a '=' character. Every property is terminated by
++ * a '\0' character. The last property is not terminated.
++ *
++ * Example of a message structure:
++ * 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
++ * 0008 34 00 record is 52 bytes long
++ * 000a 0b 00 text is 11 bytes long
++ * 000c 1f 00 dictionary is 23 bytes long
++ * 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
++ * 0010 69 74 27 73 20 61 20 6c "it's a l"
++ * 69 6e 65 "ine"
++ * 001b 44 45 56 49 43 "DEVIC"
++ * 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
++ * 52 49 56 45 52 3d 62 75 "RIVER=bu"
++ * 67 "g"
++ * 0032 00 00 00 padding to next message header
++ *
++ * The 'struct printk_log' buffer header must never be directly exported to
++ * userspace, it is a kernel-private implementation detail that might
++ * need to be changed in the future, when the requirements change.
++ *
++ * /dev/kmsg exports the structured data in the following line format:
++ * "level,sequnum,timestamp;<message text>\n"
++ *
++ * The optional key/value pairs are attached as continuation lines starting
++ * with a space character and terminated by a newline. All possible
++ * non-prinatable characters are escaped in the "\xff" notation.
++ *
++ * Users of the export format should ignore possible additional values
++ * separated by ',', and find the message after the ';' character.
++ */
++
++enum log_flags {
++ LOG_NOCONS = 1, /* already flushed, do not print to console */
++ LOG_NEWLINE = 2, /* text ended with a newline */
++ LOG_PREFIX = 4, /* text started with a prefix */
++ LOG_CONT = 8, /* text is a fragment of a continuation line */
++};
++
++struct printk_log {
++ u64 ts_nsec; /* timestamp in nanoseconds */
++ u16 len; /* length of entire record */
++ u16 text_len; /* length of text buffer */
++ u16 dict_len; /* length of dictionary buffer */
++ u8 facility; /* syslog facility */
++ u8 flags:5; /* internal record flags */
++ u8 level:3; /* syslog level */
++};
++
++/*
++ * The logbuf_lock protects kmsg buffer, indices, counters. It is also
++ * used in interesting ways to provide interlocking in console_unlock();
++ */
++static DEFINE_RAW_SPINLOCK(logbuf_lock);
++
++#ifdef CONFIG_PRINTK
++DECLARE_WAIT_QUEUE_HEAD(log_wait);
++/* the next printk record to read by syslog(READ) or /proc/kmsg */
++static u64 syslog_seq;
++static u32 syslog_idx;
++static enum log_flags syslog_prev;
++static size_t syslog_partial;
++
++/* index and sequence number of the first record stored in the buffer */
++static u64 log_first_seq;
++static u32 log_first_idx;
++
++/* index and sequence number of the next record to store in the buffer */
++static u64 log_next_seq;
++static u32 log_next_idx;
++
++/* the next printk record to write to the console */
++static u64 console_seq;
++static u32 console_idx;
++static enum log_flags console_prev;
++
++/* the next printk record to read after the last 'clear' command */
++static u64 clear_seq;
++static u32 clear_idx;
++
++#define PREFIX_MAX 32
++#define LOG_LINE_MAX 1024 - PREFIX_MAX
++
++/* record buffer */
++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
++#define LOG_ALIGN 4
++#else
++#define LOG_ALIGN __alignof__(struct printk_log)
++#endif
++#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
++static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
++static char *log_buf = __log_buf;
++static u32 log_buf_len = __LOG_BUF_LEN;
++
++/* cpu currently holding logbuf_lock */
++static volatile unsigned int logbuf_cpu = UINT_MAX;
++
++/* human readable text of the record */
++static char *log_text(const struct printk_log *msg)
++{
++ return (char *)msg + sizeof(struct printk_log);
++}
++
++/* optional key/value pair dictionary attached to the record */
++static char *log_dict(const struct printk_log *msg)
++{
++ return (char *)msg + sizeof(struct printk_log) + msg->text_len;
++}
++
++/* get record by index; idx must point to valid msg */
++static struct printk_log *log_from_idx(u32 idx)
++{
++ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
++
++ /*
++ * A length == 0 record is the end of buffer marker. Wrap around and
++ * read the message at the start of the buffer.
++ */
++ if (!msg->len)
++ return (struct printk_log *)log_buf;
++ return msg;
++}
++
++/* get next record; idx must point to valid msg */
++static u32 log_next(u32 idx)
++{
++ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
++
++ /* length == 0 indicates the end of the buffer; wrap */
++ /*
++ * A length == 0 record is the end of buffer marker. Wrap around and
++ * read the message at the start of the buffer as *this* one, and
++ * return the one after that.
++ */
++ if (!msg->len) {
++ msg = (struct printk_log *)log_buf;
++ return msg->len;
++ }
++ return idx + msg->len;
++}
++
++/* insert record into the buffer, discard old ones, update heads */
++static void log_store(int facility, int level,
++ enum log_flags flags, u64 ts_nsec,
++ const char *dict, u16 dict_len,
++ const char *text, u16 text_len)
++{
++ struct printk_log *msg;
++ u32 size, pad_len;
++
++ /* number of '\0' padding bytes to next message */
++ size = sizeof(struct printk_log) + text_len + dict_len;
++ pad_len = (-size) & (LOG_ALIGN - 1);
++ size += pad_len;
++
++ while (log_first_seq < log_next_seq) {
++ u32 free;
++
++ if (log_next_idx > log_first_idx)
++ free = max(log_buf_len - log_next_idx, log_first_idx);
++ else
++ free = log_first_idx - log_next_idx;
++
++ if (free > size + sizeof(struct printk_log))
++ break;
++
++ /* drop old messages until we have enough contiuous space */
++ log_first_idx = log_next(log_first_idx);
++ log_first_seq++;
++ }
++
++ if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) {
++ /*
++ * This message + an additional empty header does not fit
++ * at the end of the buffer. Add an empty header with len == 0
++ * to signify a wrap around.
++ */
++ memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
++ log_next_idx = 0;
++ }
++
++ /* fill message */
++ msg = (struct printk_log *)(log_buf + log_next_idx);
++ memcpy(log_text(msg), text, text_len);
++ msg->text_len = text_len;
++ memcpy(log_dict(msg), dict, dict_len);
++ msg->dict_len = dict_len;
++ msg->facility = facility;
++ msg->level = level & 7;
++ msg->flags = flags & 0x1f;
++ if (ts_nsec > 0)
++ msg->ts_nsec = ts_nsec;
++ else
++ msg->ts_nsec = local_clock();
++ memset(log_dict(msg) + dict_len, 0, pad_len);
++ msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len;
++
++ /* insert message */
++ log_next_idx += msg->len;
++ log_next_seq++;
++}
++
++#ifdef CONFIG_SECURITY_DMESG_RESTRICT
++int dmesg_restrict __read_only = 1;
++#else
++int dmesg_restrict __read_only;
++#endif
++
++static int syslog_action_restricted(int type)
++{
++ if (dmesg_restrict)
++ return 1;
++ /*
++ * Unless restricted, we allow "read all" and "get buffer size"
++ * for everybody.
++ */
++ return type != SYSLOG_ACTION_READ_ALL &&
++ type != SYSLOG_ACTION_SIZE_BUFFER;
++}
++
++static int check_syslog_permissions(int type, bool from_file)
++{
++ /*
++ * If this is from /proc/kmsg and we've already opened it, then we've
++ * already done the capabilities checks at open time.
++ */
++ if (from_file && type != SYSLOG_ACTION_OPEN)
++ return 0;
++
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
++ return -EPERM;
++#endif
++
++ if (syslog_action_restricted(type)) {
++ if (capable(CAP_SYSLOG))
++ return 0;
++ /*
++ * For historical reasons, accept CAP_SYS_ADMIN too, with
++ * a warning.
++ */
++ if (capable(CAP_SYS_ADMIN)) {
++ pr_warn_once("%s (%d): Attempt to access syslog with "
++ "CAP_SYS_ADMIN but no CAP_SYSLOG "
++ "(deprecated).\n",
++ current->comm, task_pid_nr(current));
++ return 0;
++ }
++ return -EPERM;
++ }
++ return security_syslog(type);
++}
++
++
++/* /dev/kmsg - userspace message inject/listen interface */
++struct devkmsg_user {
++ u64 seq;
++ u32 idx;
++ enum log_flags prev;
++ struct mutex lock;
++ char buf[8192];
++};
++
++static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
++ unsigned long count, loff_t pos)
++{
++ char *buf, *line;
++ int i;
++ int level = default_message_loglevel;
++ int facility = 1; /* LOG_USER */
++ size_t len = iov_length(iv, count);
++ ssize_t ret = len;
++
++ if (len > LOG_LINE_MAX)
++ return -EINVAL;
++ buf = kmalloc(len+1, GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
++
++ line = buf;
++ for (i = 0; i < count; i++) {
++ if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ line += iv[i].iov_len;
++ }
++
++ /*
++ * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
++ * the decimal value represents 32bit, the lower 3 bit are the log
++ * level, the rest are the log facility.
++ *
++ * If no prefix or no userspace facility is specified, we
++ * enforce LOG_USER, to be able to reliably distinguish
++ * kernel-generated messages from userspace-injected ones.
++ */
++ line = buf;
++ if (line[0] == '<') {
++ char *endp = NULL;
++
++ i = simple_strtoul(line+1, &endp, 10);
++ if (endp && endp[0] == '>') {
++ level = i & 7;
++ if (i >> 3)
++ facility = i >> 3;
++ endp++;
++ len -= endp - line;
++ line = endp;
++ }
++ }
++ line[len] = '\0';
++
++ printk_emit(facility, level, NULL, 0, "%s", line);
++out:
++ kfree(buf);
++ return ret;
++}
++
++static ssize_t devkmsg_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct devkmsg_user *user = file->private_data;
++ struct printk_log *msg;
++ u64 ts_usec;
++ size_t i;
++ char cont = '-';
++ size_t len;
++ ssize_t ret;
++
++ if (!user)
++ return -EBADF;
++
++ ret = mutex_lock_interruptible(&user->lock);
++ if (ret)
++ return ret;
++ raw_spin_lock_irq(&logbuf_lock);
++ while (user->seq == log_next_seq) {
++ if (file->f_flags & O_NONBLOCK) {
++ ret = -EAGAIN;
++ raw_spin_unlock_irq(&logbuf_lock);
++ goto out;
++ }
++
++ raw_spin_unlock_irq(&logbuf_lock);
++ ret = wait_event_interruptible(log_wait,
++ user->seq != log_next_seq);
++ if (ret)
++ goto out;
++ raw_spin_lock_irq(&logbuf_lock);
++ }
++
++ if (user->seq < log_first_seq) {
++ /* our last seen message is gone, return error and reset */
++ user->idx = log_first_idx;
++ user->seq = log_first_seq;
++ ret = -EPIPE;
++ raw_spin_unlock_irq(&logbuf_lock);
++ goto out;
++ }
++
++ msg = log_from_idx(user->idx);
++ ts_usec = msg->ts_nsec;
++ do_div(ts_usec, 1000);
++
++ /*
++ * If we couldn't merge continuation line fragments during the print,
++ * export the stored flags to allow an optional external merge of the
++ * records. Merging the records isn't always neccessarily correct, like
++ * when we hit a race during printing. In most cases though, it produces
++ * better readable output. 'c' in the record flags mark the first
++ * fragment of a line, '+' the following.
++ */
++ if (msg->flags & LOG_CONT && !(user->prev & LOG_CONT))
++ cont = 'c';
++ else if ((msg->flags & LOG_CONT) ||
++ ((user->prev & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
++ cont = '+';
++
++ len = sprintf(user->buf, "%u,%llu,%llu,%c;",
++ (msg->facility << 3) | msg->level,
++ user->seq, ts_usec, cont);
++ user->prev = msg->flags;
++
++ /* escape non-printable characters */
++ for (i = 0; i < msg->text_len; i++) {
++ unsigned char c = log_text(msg)[i];
++
++ if (c < ' ' || c >= 127 || c == '\\')
++ len += sprintf(user->buf + len, "\\x%02x", c);
++ else
++ user->buf[len++] = c;
++ }
++ user->buf[len++] = '\n';
++
++ if (msg->dict_len) {
++ bool line = true;
++
++ for (i = 0; i < msg->dict_len; i++) {
++ unsigned char c = log_dict(msg)[i];
++
++ if (line) {
++ user->buf[len++] = ' ';
++ line = false;
++ }
++
++ if (c == '\0') {
++ user->buf[len++] = '\n';
++ line = true;
++ continue;
++ }
++
++ if (c < ' ' || c >= 127 || c == '\\') {
++ len += sprintf(user->buf + len, "\\x%02x", c);
++ continue;
++ }
++
++ user->buf[len++] = c;
++ }
++ user->buf[len++] = '\n';
++ }
++
++ user->idx = log_next(user->idx);
++ user->seq++;
++ raw_spin_unlock_irq(&logbuf_lock);
++
++ if (len > count) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (copy_to_user(buf, user->buf, len)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ ret = len;
++out:
++ mutex_unlock(&user->lock);
++ return ret;
++}
++
++static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
++{
++ struct devkmsg_user *user = file->private_data;
++ loff_t ret = 0;
++
++ if (!user)
++ return -EBADF;
++ if (offset)
++ return -ESPIPE;
++
++ raw_spin_lock_irq(&logbuf_lock);
++ switch (whence) {
++ case SEEK_SET:
++ /* the first record */
++ user->idx = log_first_idx;
++ user->seq = log_first_seq;
++ break;
++ case SEEK_DATA:
++ /*
++ * The first record after the last SYSLOG_ACTION_CLEAR,
++ * like issued by 'dmesg -c'. Reading /dev/kmsg itself
++ * changes no global state, and does not clear anything.
++ */
++ user->idx = clear_idx;
++ user->seq = clear_seq;
++ break;
++ case SEEK_END:
++ /* after the last record */
++ user->idx = log_next_idx;
++ user->seq = log_next_seq;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ raw_spin_unlock_irq(&logbuf_lock);
++ return ret;
++}
++
++static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
++{
++ struct devkmsg_user *user = file->private_data;
++ int ret = 0;
++
++ if (!user)
++ return POLLERR|POLLNVAL;
++
++ poll_wait(file, &log_wait, wait);
++
++ raw_spin_lock_irq(&logbuf_lock);
++ if (user->seq < log_next_seq) {
++ /* return error when data has vanished underneath us */
++ if (user->seq < log_first_seq)
++ ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
++ else
++ ret = POLLIN|POLLRDNORM;
++ }
++ raw_spin_unlock_irq(&logbuf_lock);
++
++ return ret;
++}
++
++static int devkmsg_open(struct inode *inode, struct file *file)
++{
++ struct devkmsg_user *user;
++ int err;
++
++ /* write-only does not need any file context */
++ if ((file->f_flags & O_ACCMODE) == O_WRONLY)
++ return 0;
++
++ err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
++ SYSLOG_FROM_READER);
++ if (err)
++ return err;
++
++ user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
++ if (!user)
++ return -ENOMEM;
++
++ mutex_init(&user->lock);
++
++ raw_spin_lock_irq(&logbuf_lock);
++ user->idx = log_first_idx;
++ user->seq = log_first_seq;
++ raw_spin_unlock_irq(&logbuf_lock);
++
++ file->private_data = user;
++ return 0;
++}
++
++static int devkmsg_release(struct inode *inode, struct file *file)
++{
++ struct devkmsg_user *user = file->private_data;
++
++ if (!user)
++ return 0;
++
++ mutex_destroy(&user->lock);
++ kfree(user);
++ return 0;
++}
++
++const struct file_operations kmsg_fops = {
++ .open = devkmsg_open,
++ .read = devkmsg_read,
++ .aio_write = devkmsg_writev,
++ .llseek = devkmsg_llseek,
++ .poll = devkmsg_poll,
++ .release = devkmsg_release,
++};
++
++#ifdef CONFIG_KEXEC
++/*
++ * This appends the listed symbols to /proc/vmcore
++ *
++ * /proc/vmcore is used by various utilities, like crash and makedumpfile to
++ * obtain access to symbols that are otherwise very difficult to locate. These
++ * symbols are specifically used so that utilities can access and extract the
++ * dmesg log from a vmcore file after a crash.
++ */
++void log_buf_kexec_setup(void)
++{
++ VMCOREINFO_SYMBOL(log_buf);
++ VMCOREINFO_SYMBOL(log_buf_len);
++ VMCOREINFO_SYMBOL(log_first_idx);
++ VMCOREINFO_SYMBOL(log_next_idx);
++ /*
++ * Export struct printk_log size and field offsets. User space tools can
++ * parse it and detect any changes to structure down the line.
++ */
++ VMCOREINFO_STRUCT_SIZE(printk_log);
++ VMCOREINFO_OFFSET(printk_log, ts_nsec);
++ VMCOREINFO_OFFSET(printk_log, len);
++ VMCOREINFO_OFFSET(printk_log, text_len);
++ VMCOREINFO_OFFSET(printk_log, dict_len);
++}
++#endif
++
++/* requested log_buf_len from kernel cmdline */
++static unsigned long __initdata new_log_buf_len;
++
++/* save requested log_buf_len since it's too early to process it */
++static int __init log_buf_len_setup(char *str)
++{
++ unsigned size = memparse(str, &str);
++
++ if (size)
++ size = roundup_pow_of_two(size);
++ if (size > log_buf_len)
++ new_log_buf_len = size;
++
++ return 0;
++}
++early_param("log_buf_len", log_buf_len_setup);
++
++void __init setup_log_buf(int early)
++{
++ unsigned long flags;
++ char *new_log_buf;
++ int free;
++
++ if (!new_log_buf_len)
++ return;
++
++ if (early) {
++ new_log_buf =
++ memblock_virt_alloc(new_log_buf_len, PAGE_SIZE);
++ } else {
++ new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, 0);
++ }
++
++ if (unlikely(!new_log_buf)) {
++ pr_err("log_buf_len: %ld bytes not available\n",
++ new_log_buf_len);
++ return;
++ }
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ log_buf_len = new_log_buf_len;
++ log_buf = new_log_buf;
++ new_log_buf_len = 0;
++ free = __LOG_BUF_LEN - log_next_idx;
++ memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++
++ pr_info("log_buf_len: %d\n", log_buf_len);
++ pr_info("early log buf free: %d(%d%%)\n",
++ free, (free * 100) / __LOG_BUF_LEN);
++}
++
++static bool __read_mostly ignore_loglevel;
++
++static int __init ignore_loglevel_setup(char *str)
++{
++ ignore_loglevel = 1;
++ pr_info("debug: ignoring loglevel setting.\n");
++
++ return 0;
++}
++
++early_param("ignore_loglevel", ignore_loglevel_setup);
++module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
++ "print all kernel messages to the console.");
++
++#ifdef CONFIG_BOOT_PRINTK_DELAY
++
++static int boot_delay; /* msecs delay after each printk during bootup */
++static unsigned long long loops_per_msec; /* based on boot_delay */
++
++static int __init boot_delay_setup(char *str)
++{
++ unsigned long lpj;
++
++ lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
++ loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
++
++ get_option(&str, &boot_delay);
++ if (boot_delay > 10 * 1000)
++ boot_delay = 0;
++
++ pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
++ "HZ: %d, loops_per_msec: %llu\n",
++ boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
++ return 0;
++}
++early_param("boot_delay", boot_delay_setup);
++
++static void boot_delay_msec(int level)
++{
++ unsigned long long k;
++ unsigned long timeout;
++
++ if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
++ || (level >= console_loglevel && !ignore_loglevel)) {
++ return;
++ }
++
++ k = (unsigned long long)loops_per_msec * boot_delay;
++
++ timeout = jiffies + msecs_to_jiffies(boot_delay);
++ while (k) {
++ k--;
++ cpu_relax();
++ /*
++ * use (volatile) jiffies to prevent
++ * compiler reduction; loop termination via jiffies
++ * is secondary and may or may not happen.
++ */
++ if (time_after(jiffies, timeout))
++ break;
++ touch_nmi_watchdog();
++ }
++}
++#else
++static inline void boot_delay_msec(int level)
++{
++}
++#endif
++
++#if defined(CONFIG_PRINTK_TIME)
++static bool printk_time = 1;
++#else
++static bool printk_time;
++#endif
++module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
++
++static size_t print_time(u64 ts, char *buf)
++{
++ unsigned long rem_nsec;
++
++ if (!printk_time)
++ return 0;
++
++ rem_nsec = do_div(ts, 1000000000);
++
++ if (!buf)
++ return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
++
++ return sprintf(buf, "[%5lu.%06lu] ",
++ (unsigned long)ts, rem_nsec / 1000);
++}
++
++static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
++{
++ size_t len = 0;
++ unsigned int prefix = (msg->facility << 3) | msg->level;
++
++ if (syslog) {
++ if (buf) {
++ len += sprintf(buf, "<%u>", prefix);
++ } else {
++ len += 3;
++ if (prefix > 999)
++ len += 3;
++ else if (prefix > 99)
++ len += 2;
++ else if (prefix > 9)
++ len++;
++ }
++ }
++
++ len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
++ return len;
++}
++
++static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
++ bool syslog, char *buf, size_t size)
++{
++ const char *text = log_text(msg);
++ size_t text_size = msg->text_len;
++ bool prefix = true;
++ bool newline = true;
++ size_t len = 0;
++
++ if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
++ prefix = false;
++
++ if (msg->flags & LOG_CONT) {
++ if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
++ prefix = false;
++
++ if (!(msg->flags & LOG_NEWLINE))
++ newline = false;
++ }
++
++ do {
++ const char *next = memchr(text, '\n', text_size);
++ size_t text_len;
++
++ if (next) {
++ text_len = next - text;
++ next++;
++ text_size -= next - text;
++ } else {
++ text_len = text_size;
++ }
++
++ if (buf) {
++ if (print_prefix(msg, syslog, NULL) +
++ text_len + 1 >= size - len)
++ break;
++
++ if (prefix)
++ len += print_prefix(msg, syslog, buf + len);
++ memcpy(buf + len, text, text_len);
++ len += text_len;
++ if (next || newline)
++ buf[len++] = '\n';
++ } else {
++ /* SYSLOG_ACTION_* buffer size only calculation */
++ if (prefix)
++ len += print_prefix(msg, syslog, NULL);
++ len += text_len;
++ if (next || newline)
++ len++;
++ }
++
++ prefix = true;
++ text = next;
++ } while (text);
++
++ return len;
++}
++
++static int syslog_print(char __user *buf, int size)
++{
++ char *text;
++ struct printk_log *msg;
++ int len = 0;
++
++ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
++ if (!text)
++ return -ENOMEM;
++
++ while (size > 0) {
++ size_t n;
++ size_t skip;
++
++ raw_spin_lock_irq(&logbuf_lock);
++ if (syslog_seq < log_first_seq) {
++ /* messages are gone, move to first one */
++ syslog_seq = log_first_seq;
++ syslog_idx = log_first_idx;
++ syslog_prev = 0;
++ syslog_partial = 0;
++ }
++ if (syslog_seq == log_next_seq) {
++ raw_spin_unlock_irq(&logbuf_lock);
++ break;
++ }
++
++ skip = syslog_partial;
++ msg = log_from_idx(syslog_idx);
++ n = msg_print_text(msg, syslog_prev, true, text,
++ LOG_LINE_MAX + PREFIX_MAX);
++ if (n - syslog_partial <= size) {
++ /* message fits into buffer, move forward */
++ syslog_idx = log_next(syslog_idx);
++ syslog_seq++;
++ syslog_prev = msg->flags;
++ n -= syslog_partial;
++ syslog_partial = 0;
++ } else if (!len){
++ /* partial read(), remember position */
++ n = size;
++ syslog_partial += n;
++ } else
++ n = 0;
++ raw_spin_unlock_irq(&logbuf_lock);
++
++ if (!n)
++ break;
++
++ if (copy_to_user(buf, text + skip, n)) {
++ if (!len)
++ len = -EFAULT;
++ break;
++ }
++
++ len += n;
++ size -= n;
++ buf += n;
++ }
++
++ kfree(text);
++ return len;
++}
++
++static int syslog_print_all(char __user *buf, int size, bool clear)
++{
++ char *text;
++ int len = 0;
++
++ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
++ if (!text)
++ return -ENOMEM;
++
++ raw_spin_lock_irq(&logbuf_lock);
++ if (buf) {
++ u64 next_seq;
++ u64 seq;
++ u32 idx;
++ enum log_flags prev;
++
++ if (clear_seq < log_first_seq) {
++ /* messages are gone, move to first available one */
++ clear_seq = log_first_seq;
++ clear_idx = log_first_idx;
++ }
++
++ /*
++ * Find first record that fits, including all following records,
++ * into the user-provided buffer for this dump.
++ */
++ seq = clear_seq;
++ idx = clear_idx;
++ prev = 0;
++ while (seq < log_next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++
++ len += msg_print_text(msg, prev, true, NULL, 0);
++ prev = msg->flags;
++ idx = log_next(idx);
++ seq++;
++ }
++
++ /* move first record forward until length fits into the buffer */
++ seq = clear_seq;
++ idx = clear_idx;
++ prev = 0;
++ while (len > size && seq < log_next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++
++ len -= msg_print_text(msg, prev, true, NULL, 0);
++ prev = msg->flags;
++ idx = log_next(idx);
++ seq++;
++ }
++
++ /* last message fitting into this dump */
++ next_seq = log_next_seq;
++
++ len = 0;
++ while (len >= 0 && seq < next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++ int textlen;
++
++ textlen = msg_print_text(msg, prev, true, text,
++ LOG_LINE_MAX + PREFIX_MAX);
++ if (textlen < 0) {
++ len = textlen;
++ break;
++ }
++ idx = log_next(idx);
++ seq++;
++ prev = msg->flags;
++
++ raw_spin_unlock_irq(&logbuf_lock);
++ if (copy_to_user(buf + len, text, textlen))
++ len = -EFAULT;
++ else
++ len += textlen;
++ raw_spin_lock_irq(&logbuf_lock);
++
++ if (seq < log_first_seq) {
++ /* messages are gone, move to next one */
++ seq = log_first_seq;
++ idx = log_first_idx;
++ prev = 0;
++ }
++ }
++ }
++
++ if (clear) {
++ clear_seq = log_next_seq;
++ clear_idx = log_next_idx;
++ }
++ raw_spin_unlock_irq(&logbuf_lock);
++
++ kfree(text);
++ return len;
++}
++
++int do_syslog(int type, char __user *buf, int len, bool from_file)
++{
++ bool clear = false;
++ static int saved_console_loglevel = -1;
++ int error;
++
++ error = check_syslog_permissions(type, from_file);
++ if (error)
++ goto out;
++
++ error = security_syslog(type);
++ if (error)
++ return error;
++
++ switch (type) {
++ case SYSLOG_ACTION_CLOSE: /* Close log */
++ break;
++ case SYSLOG_ACTION_OPEN: /* Open log */
++ break;
++ case SYSLOG_ACTION_READ: /* Read from log */
++ error = -EINVAL;
++ if (!buf || len < 0)
++ goto out;
++ error = 0;
++ if (!len)
++ goto out;
++ if (!access_ok(VERIFY_WRITE, buf, len)) {
++ error = -EFAULT;
++ goto out;
++ }
++ error = wait_event_interruptible(log_wait,
++ syslog_seq != log_next_seq);
++ if (error)
++ goto out;
++ error = syslog_print(buf, len);
++ break;
++ /* Read/clear last kernel messages */
++ case SYSLOG_ACTION_READ_CLEAR:
++ clear = true;
++ /* FALL THRU */
++ /* Read last kernel messages */
++ case SYSLOG_ACTION_READ_ALL:
++ error = -EINVAL;
++ if (!buf || len < 0)
++ goto out;
++ error = 0;
++ if (!len)
++ goto out;
++ if (!access_ok(VERIFY_WRITE, buf, len)) {
++ error = -EFAULT;
++ goto out;
++ }
++ error = syslog_print_all(buf, len, clear);
++ break;
++ /* Clear ring buffer */
++ case SYSLOG_ACTION_CLEAR:
++ syslog_print_all(NULL, 0, true);
++ break;
++ /* Disable logging to console */
++ case SYSLOG_ACTION_CONSOLE_OFF:
++ if (saved_console_loglevel == -1)
++ saved_console_loglevel = console_loglevel;
++ console_loglevel = minimum_console_loglevel;
++ break;
++ /* Enable logging to console */
++ case SYSLOG_ACTION_CONSOLE_ON:
++ if (saved_console_loglevel != -1) {
++ console_loglevel = saved_console_loglevel;
++ saved_console_loglevel = -1;
++ }
++ break;
++ /* Set level of messages printed to console */
++ case SYSLOG_ACTION_CONSOLE_LEVEL:
++ error = -EINVAL;
++ if (len < 1 || len > 8)
++ goto out;
++ if (len < minimum_console_loglevel)
++ len = minimum_console_loglevel;
++ console_loglevel = len;
++ /* Implicitly re-enable logging to console */
++ saved_console_loglevel = -1;
++ error = 0;
++ break;
++ /* Number of chars in the log buffer */
++ case SYSLOG_ACTION_SIZE_UNREAD:
++ raw_spin_lock_irq(&logbuf_lock);
++ if (syslog_seq < log_first_seq) {
++ /* messages are gone, move to first one */
++ syslog_seq = log_first_seq;
++ syslog_idx = log_first_idx;
++ syslog_prev = 0;
++ syslog_partial = 0;
++ }
++ if (from_file) {
++ /*
++ * Short-cut for poll(/"proc/kmsg") which simply checks
++ * for pending data, not the size; return the count of
++ * records, not the length.
++ */
++ error = log_next_idx - syslog_idx;
++ } else {
++ u64 seq = syslog_seq;
++ u32 idx = syslog_idx;
++ enum log_flags prev = syslog_prev;
++
++ error = 0;
++ while (seq < log_next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++
++ error += msg_print_text(msg, prev, true, NULL, 0);
++ idx = log_next(idx);
++ seq++;
++ prev = msg->flags;
++ }
++ error -= syslog_partial;
++ }
++ raw_spin_unlock_irq(&logbuf_lock);
++ break;
++ /* Size of the log buffer */
++ case SYSLOG_ACTION_SIZE_BUFFER:
++ error = log_buf_len;
++ break;
++ default:
++ error = -EINVAL;
++ break;
++ }
++out:
++ return error;
++}
++
++SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
++{
++ return do_syslog(type, buf, len, SYSLOG_FROM_READER);
++}
++
++/*
++ * Call the console drivers, asking them to write out
++ * log_buf[start] to log_buf[end - 1].
++ * The console_lock must be held.
++ */
++static void call_console_drivers(int level, const char *text, size_t len)
++{
++ struct console *con;
++
++ trace_console(text, len);
++
++ if (level >= console_loglevel && !ignore_loglevel)
++ return;
++ if (!console_drivers)
++ return;
++
++ for_each_console(con) {
++ if (exclusive_console && con != exclusive_console)
++ continue;
++ if (!(con->flags & CON_ENABLED))
++ continue;
++ if (!con->write)
++ continue;
++ if (!cpu_online(smp_processor_id()) &&
++ !(con->flags & CON_ANYTIME))
++ continue;
++ con->write(con, text, len);
++ }
++}
++
++/*
++ * Zap console related locks when oopsing. Only zap at most once
++ * every 10 seconds, to leave time for slow consoles to print a
++ * full oops.
++ */
++static void zap_locks(void)
++{
++ static unsigned long oops_timestamp;
++
++ if (time_after_eq(jiffies, oops_timestamp) &&
++ !time_after(jiffies, oops_timestamp + 30 * HZ))
++ return;
++
++ oops_timestamp = jiffies;
++
++ debug_locks_off();
++ /* If a crash is occurring, make sure we can't deadlock */
++ raw_spin_lock_init(&logbuf_lock);
++ /* And make sure that we print immediately */
++ sema_init(&console_sem, 1);
++}
++
++/* Check if we have any console registered that can be called early in boot. */
++static int have_callable_console(void)
++{
++ struct console *con;
++
++ for_each_console(con)
++ if (con->flags & CON_ANYTIME)
++ return 1;
++
++ return 0;
++}
++
++/*
++ * Can we actually use the console at this time on this cpu?
++ *
++ * Console drivers may assume that per-cpu resources have
++ * been allocated. So unless they're explicitly marked as
++ * being able to cope (CON_ANYTIME) don't call them until
++ * this CPU is officially up.
++ */
++static inline int can_use_console(unsigned int cpu)
++{
++ return cpu_online(cpu) || have_callable_console();
++}
++
++/*
++ * Try to get console ownership to actually show the kernel
++ * messages from a 'printk'. Return true (and with the
++ * console_lock held, and 'console_locked' set) if it
++ * is successful, false otherwise.
++ *
++ * This gets called with the 'logbuf_lock' spinlock held and
++ * interrupts disabled. It should return with 'lockbuf_lock'
++ * released but interrupts still disabled.
++ */
++static int console_trylock_for_printk(unsigned int cpu)
++ __releases(&logbuf_lock)
++{
++ int retval = 0, wake = 0;
++
++ if (console_trylock()) {
++ retval = 1;
++
++ /*
++ * If we can't use the console, we need to release
++ * the console semaphore by hand to avoid flushing
++ * the buffer. We need to hold the console semaphore
++ * in order to do this test safely.
++ */
++ if (!can_use_console(cpu)) {
++ console_locked = 0;
++ wake = 1;
++ retval = 0;
++ }
++ }
++ logbuf_cpu = UINT_MAX;
++ raw_spin_unlock(&logbuf_lock);
++ if (wake)
++ up(&console_sem);
++ return retval;
++}
++
++int printk_delay_msec __read_mostly;
++
++static inline void printk_delay(void)
++{
++ if (unlikely(printk_delay_msec)) {
++ int m = printk_delay_msec;
++
++ while (m--) {
++ mdelay(1);
++ touch_nmi_watchdog();
++ }
++ }
++}
++
++/*
++ * Continuation lines are buffered, and not committed to the record buffer
++ * until the line is complete, or a race forces it. The line fragments
++ * though, are printed immediately to the consoles to ensure everything has
++ * reached the console in case of a kernel crash.
++ */
++static struct cont {
++ char buf[LOG_LINE_MAX];
++ size_t len; /* length == 0 means unused buffer */
++ size_t cons; /* bytes written to console */
++ struct task_struct *owner; /* task of first print*/
++ u64 ts_nsec; /* time of first print */
++ u8 level; /* log level of first message */
++ u8 facility; /* log level of first message */
++ enum log_flags flags; /* prefix, newline flags */
++ bool flushed:1; /* buffer sealed and committed */
++} cont;
++
++static void cont_flush(enum log_flags flags)
++{
++ if (cont.flushed)
++ return;
++ if (cont.len == 0)
++ return;
++
++ if (cont.cons) {
++ /*
++ * If a fragment of this line was directly flushed to the
++ * console; wait for the console to pick up the rest of the
++ * line. LOG_NOCONS suppresses a duplicated output.
++ */
++ log_store(cont.facility, cont.level, flags | LOG_NOCONS,
++ cont.ts_nsec, NULL, 0, cont.buf, cont.len);
++ cont.flags = flags;
++ cont.flushed = true;
++ } else {
++ /*
++ * If no fragment of this line ever reached the console,
++ * just submit it to the store and free the buffer.
++ */
++ log_store(cont.facility, cont.level, flags, 0,
++ NULL, 0, cont.buf, cont.len);
++ cont.len = 0;
++ }
++}
++
++static bool cont_add(int facility, int level, const char *text, size_t len)
++{
++ if (cont.len && cont.flushed)
++ return false;
++
++ if (cont.len + len > sizeof(cont.buf)) {
++ /* the line gets too long, split it up in separate records */
++ cont_flush(LOG_CONT);
++ return false;
++ }
++
++ if (!cont.len) {
++ cont.facility = facility;
++ cont.level = level;
++ cont.owner = current;
++ cont.ts_nsec = local_clock();
++ cont.flags = 0;
++ cont.cons = 0;
++ cont.flushed = false;
++ }
++
++ memcpy(cont.buf + cont.len, text, len);
++ cont.len += len;
++
++ if (cont.len > (sizeof(cont.buf) * 80) / 100)
++ cont_flush(LOG_CONT);
++
++ return true;
++}
++
++static size_t cont_print_text(char *text, size_t size)
++{
++ size_t textlen = 0;
++ size_t len;
++
++ if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
++ textlen += print_time(cont.ts_nsec, text);
++ size -= textlen;
++ }
++
++ len = cont.len - cont.cons;
++ if (len > 0) {
++ if (len+1 > size)
++ len = size-1;
++ memcpy(text + textlen, cont.buf + cont.cons, len);
++ textlen += len;
++ cont.cons = cont.len;
++ }
++
++ if (cont.flushed) {
++ if (cont.flags & LOG_NEWLINE)
++ text[textlen++] = '\n';
++ /* got everything, release buffer */
++ cont.len = 0;
++ }
++ return textlen;
++}
++
++asmlinkage int vprintk_emit(int facility, int level,
++ const char *dict, size_t dictlen,
++ const char *fmt, va_list args)
++{
++ static int recursion_bug;
++ static char textbuf[LOG_LINE_MAX];
++ char *text = textbuf;
++ size_t text_len;
++ enum log_flags lflags = 0;
++ unsigned long flags;
++ int this_cpu;
++ int printed_len = 0;
++
++ boot_delay_msec(level);
++ printk_delay();
++
++ /* This stops the holder of console_sem just where we want him */
++ local_irq_save(flags);
++ this_cpu = smp_processor_id();
++
++ /*
++ * Ouch, printk recursed into itself!
++ */
++ if (unlikely(logbuf_cpu == this_cpu)) {
++ /*
++ * If a crash is occurring during printk() on this CPU,
++ * then try to get the crash message out but make sure
++ * we can't deadlock. Otherwise just return to avoid the
++ * recursion and return - but flag the recursion so that
++ * it can be printed at the next appropriate moment:
++ */
++ if (!oops_in_progress && !lockdep_recursing(current)) {
++ recursion_bug = 1;
++ goto out_restore_irqs;
++ }
++ zap_locks();
++ }
++
++ lockdep_off();
++ raw_spin_lock(&logbuf_lock);
++ logbuf_cpu = this_cpu;
++
++ if (recursion_bug) {
++ static const char recursion_msg[] =
++ "BUG: recent printk recursion!";
++
++ recursion_bug = 0;
++ printed_len += strlen(recursion_msg);
++ /* emit KERN_CRIT message */
++ log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
++ NULL, 0, recursion_msg, printed_len);
++ }
++
++ /*
++ * The printf needs to come first; we need the syslog
++ * prefix which might be passed-in as a parameter.
++ */
++ text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
++
++ /* mark and strip a trailing newline */
++ if (text_len && text[text_len-1] == '\n') {
++ text_len--;
++ lflags |= LOG_NEWLINE;
++ }
++
++ /* strip kernel syslog prefix and extract log level or control flags */
++ if (facility == 0) {
++ int kern_level = printk_get_level(text);
++
++ if (kern_level) {
++ const char *end_of_header = printk_skip_level(text);
++ switch (kern_level) {
++ case '0' ... '7':
++ if (level == -1)
++ level = kern_level - '0';
++ case 'd': /* KERN_DEFAULT */
++ lflags |= LOG_PREFIX;
++ case 'c': /* KERN_CONT */
++ break;
++ }
++ text_len -= end_of_header - text;
++ text = (char *)end_of_header;
++ }
++ }
++
++ if (level == -1)
++ level = default_message_loglevel;
++
++ if (dict)
++ lflags |= LOG_PREFIX|LOG_NEWLINE;
++
++ if (!(lflags & LOG_NEWLINE)) {
++ /*
++ * Flush the conflicting buffer. An earlier newline was missing,
++ * or another task also prints continuation lines.
++ */
++ if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
++ cont_flush(LOG_NEWLINE);
++
++ /* buffer line if possible, otherwise store it right away */
++ if (!cont_add(facility, level, text, text_len))
++ log_store(facility, level, lflags | LOG_CONT, 0,
++ dict, dictlen, text, text_len);
++ } else {
++ bool stored = false;
++
++ /*
++ * If an earlier newline was missing and it was the same task,
++ * either merge it with the current buffer and flush, or if
++ * there was a race with interrupts (prefix == true) then just
++ * flush it out and store this line separately.
++ * If the preceding printk was from a different task and missed
++ * a newline, flush and append the newline.
++ */
++ if (cont.len) {
++ if (cont.owner == current && !(lflags & LOG_PREFIX))
++ stored = cont_add(facility, level, text,
++ text_len);
++ cont_flush(LOG_NEWLINE);
++ }
++
++ if (!stored)
++ log_store(facility, level, lflags, 0,
++ dict, dictlen, text, text_len);
++ }
++ printed_len += text_len;
++
++ /*
++ * Try to acquire and then immediately release the console semaphore.
++ * The release will print out buffers and wake up /dev/kmsg and syslog()
++ * users.
++ *
++ * The console_trylock_for_printk() function will release 'logbuf_lock'
++ * regardless of whether it actually gets the console semaphore or not.
++ */
++ if (console_trylock_for_printk(this_cpu))
++ console_unlock();
++
++ lockdep_on();
++out_restore_irqs:
++ local_irq_restore(flags);
++
++ return printed_len;
++}
++EXPORT_SYMBOL(vprintk_emit);
++
++asmlinkage int vprintk(const char *fmt, va_list args)
++{
++ return vprintk_emit(0, -1, NULL, 0, fmt, args);
++}
++EXPORT_SYMBOL(vprintk);
++
++asmlinkage int printk_emit(int facility, int level,
++ const char *dict, size_t dictlen,
++ const char *fmt, ...)
++{
++ va_list args;
++ int r;
++
++ va_start(args, fmt);
++ r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
++ va_end(args);
++
++ return r;
++}
++EXPORT_SYMBOL(printk_emit);
++
++/**
++ * printk - print a kernel message
++ * @fmt: format string
++ *
++ * This is printk(). It can be called from any context. We want it to work.
++ *
++ * We try to grab the console_lock. If we succeed, it's easy - we log the
++ * output and call the console drivers. If we fail to get the semaphore, we
++ * place the output into the log buffer and return. The current holder of
++ * the console_sem will notice the new output in console_unlock(); and will
++ * send it to the consoles before releasing the lock.
++ *
++ * One effect of this deferred printing is that code which calls printk() and
++ * then changes console_loglevel may break. This is because console_loglevel
++ * is inspected when the actual printing occurs.
++ *
++ * See also:
++ * printf(3)
++ *
++ * See the vsnprintf() documentation for format string extensions over C99.
++ */
++asmlinkage int printk(const char *fmt, ...)
++{
++ va_list args;
++ int r;
++
++#ifdef CONFIG_KGDB_KDB
++ if (unlikely(kdb_trap_printk)) {
++ va_start(args, fmt);
++ r = vkdb_printf(fmt, args);
++ va_end(args);
++ return r;
++ }
++#endif
++ va_start(args, fmt);
++ r = vprintk_emit(0, -1, NULL, 0, fmt, args);
++ va_end(args);
++
++ return r;
++}
++EXPORT_SYMBOL(printk);
++
++#else /* CONFIG_PRINTK */
++
++#define LOG_LINE_MAX 0
++#define PREFIX_MAX 0
++#define LOG_LINE_MAX 0
++static u64 syslog_seq;
++static u32 syslog_idx;
++static u64 console_seq;
++static u32 console_idx;
++static enum log_flags syslog_prev;
++static u64 log_first_seq;
++static u32 log_first_idx;
++static u64 log_next_seq;
++static enum log_flags console_prev;
++static struct cont {
++ size_t len;
++ size_t cons;
++ u8 level;
++ bool flushed:1;
++} cont;
++static struct printk_log *log_from_idx(u32 idx) { return NULL; }
++static u32 log_next(u32 idx) { return 0; }
++static void call_console_drivers(int level, const char *text, size_t len) {}
++static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
++ bool syslog, char *buf, size_t size) { return 0; }
++static size_t cont_print_text(char *text, size_t size) { return 0; }
++
++#endif /* CONFIG_PRINTK */
++
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++void early_vprintk(const char *fmt, va_list ap)
++{
++ if (early_console) {
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++
++ early_console->write(early_console, buf, n);
++ }
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
++}
++#endif
++
++static int __add_preferred_console(char *name, int idx, char *options,
++ char *brl_options)
++{
++ struct console_cmdline *c;
++ int i;
++
++ /*
++ * See if this tty is not yet registered, and
++ * if we have a slot free.
++ */
++ for (i = 0, c = console_cmdline;
++ i < MAX_CMDLINECONSOLES && c->name[0];
++ i++, c++) {
++ if (strcmp(c->name, name) == 0 && c->index == idx) {
++ if (!brl_options)
++ selected_console = i;
++ return 0;
++ }
++ }
++ if (i == MAX_CMDLINECONSOLES)
++ return -E2BIG;
++ if (!brl_options)
++ selected_console = i;
++ strlcpy(c->name, name, sizeof(c->name));
++ c->options = options;
++ braille_set_options(c, brl_options);
++
++ c->index = idx;
++ return 0;
++}
++/*
++ * Set up a list of consoles. Called from init/main.c
++ */
++static int __init console_setup(char *str)
++{
++ char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
++ char *s, *options, *brl_options = NULL;
++ int idx;
++
++ if (_braille_console_setup(&str, &brl_options))
++ return 1;
++
++ /*
++ * Decode str into name, index, options.
++ */
++ if (str[0] >= '0' && str[0] <= '9') {
++ strcpy(buf, "ttyS");
++ strncpy(buf + 4, str, sizeof(buf) - 5);
++ } else {
++ strncpy(buf, str, sizeof(buf) - 1);
++ }
++ buf[sizeof(buf) - 1] = 0;
++ if ((options = strchr(str, ',')) != NULL)
++ *(options++) = 0;
++#ifdef __sparc__
++ if (!strcmp(str, "ttya"))
++ strcpy(buf, "ttyS0");
++ if (!strcmp(str, "ttyb"))
++ strcpy(buf, "ttyS1");
++#endif
++ for (s = buf; *s; s++)
++ if ((*s >= '0' && *s <= '9') || *s == ',')
++ break;
++ idx = simple_strtoul(s, NULL, 10);
++ *s = 0;
++
++ __add_preferred_console(buf, idx, options, brl_options);
++ console_set_on_cmdline = 1;
++ return 1;
++}
++__setup("console=", console_setup);
++
++/**
++ * add_preferred_console - add a device to the list of preferred consoles.
++ * @name: device name
++ * @idx: device index
++ * @options: options for this console
++ *
++ * The last preferred console added will be used for kernel messages
++ * and stdin/out/err for init. Normally this is used by console_setup
++ * above to handle user-supplied console arguments; however it can also
++ * be used by arch-specific code either to override the user or more
++ * commonly to provide a default console (ie from PROM variables) when
++ * the user has not supplied one.
++ */
++int add_preferred_console(char *name, int idx, char *options)
++{
++ return __add_preferred_console(name, idx, options, NULL);
++}
++
++int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
++{
++ struct console_cmdline *c;
++ int i;
++
++ for (i = 0, c = console_cmdline;
++ i < MAX_CMDLINECONSOLES && c->name[0];
++ i++, c++)
++ if (strcmp(c->name, name) == 0 && c->index == idx) {
++ strlcpy(c->name, name_new, sizeof(c->name));
++ c->name[sizeof(c->name) - 1] = 0;
++ c->options = options;
++ c->index = idx_new;
++ return i;
++ }
++ /* not found */
++ return -1;
++}
++
++bool console_suspend_enabled = 1;
++EXPORT_SYMBOL(console_suspend_enabled);
++
++static int __init console_suspend_disable(char *str)
++{
++ console_suspend_enabled = 0;
++ return 1;
++}
++__setup("no_console_suspend", console_suspend_disable);
++module_param_named(console_suspend, console_suspend_enabled,
++ bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
++ " and hibernate operations");
++
++/**
++ * suspend_console - suspend the console subsystem
++ *
++ * This disables printk() while we go into suspend states
++ */
++void suspend_console(void)
++{
++ if (!console_suspend_enabled)
++ return;
++ printk("Suspending console(s) (use no_console_suspend to debug)\n");
++ console_lock();
++ console_suspended = 1;
++ up(&console_sem);
++}
++
++void resume_console(void)
++{
++ if (!console_suspend_enabled)
++ return;
++ down(&console_sem);
++ console_suspended = 0;
++ console_unlock();
++}
++
++/**
++ * console_cpu_notify - print deferred console messages after CPU hotplug
++ * @self: notifier struct
++ * @action: CPU hotplug event
++ * @hcpu: unused
++ *
++ * If printk() is called from a CPU that is not online yet, the messages
++ * will be spooled but will not show up on the console. This function is
++ * called when a new CPU comes online (or fails to come up), and ensures
++ * that any such output gets printed.
++ */
++static int console_cpu_notify(struct notifier_block *self,
++ unsigned long action, void *hcpu)
++{
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_DEAD:
++ case CPU_DOWN_FAILED:
++ case CPU_UP_CANCELED:
++ console_lock();
++ console_unlock();
++ }
++ return NOTIFY_OK;
++}
++
++/**
++ * console_lock - lock the console system for exclusive use.
++ *
++ * Acquires a lock which guarantees that the caller has
++ * exclusive access to the console system and the console_drivers list.
++ *
++ * Can sleep, returns nothing.
++ */
++void console_lock(void)
++{
++ might_sleep();
++
++ down(&console_sem);
++ if (console_suspended)
++ return;
++ console_locked = 1;
++ console_may_schedule = 1;
++ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(console_lock);
++
++/**
++ * console_trylock - try to lock the console system for exclusive use.
++ *
++ * Tried to acquire a lock which guarantees that the caller has
++ * exclusive access to the console system and the console_drivers list.
++ *
++ * returns 1 on success, and 0 on failure to acquire the lock.
++ */
++int console_trylock(void)
++{
++ if (down_trylock(&console_sem))
++ return 0;
++ if (console_suspended) {
++ up(&console_sem);
++ return 0;
++ }
++ console_locked = 1;
++ console_may_schedule = 0;
++ mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
++ return 1;
++}
++EXPORT_SYMBOL(console_trylock);
++
++int is_console_locked(void)
++{
++ return console_locked;
++}
++
++static void console_cont_flush(char *text, size_t size)
++{
++ unsigned long flags;
++ size_t len;
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++
++ if (!cont.len)
++ goto out;
++
++ /*
++ * We still queue earlier records, likely because the console was
++ * busy. The earlier ones need to be printed before this one, we
++ * did not flush any fragment so far, so just let it queue up.
++ */
++ if (console_seq < log_next_seq && !cont.cons)
++ goto out;
++
++ len = cont_print_text(text, size);
++ raw_spin_unlock(&logbuf_lock);
++ stop_critical_timings();
++ call_console_drivers(cont.level, text, len);
++ start_critical_timings();
++ local_irq_restore(flags);
++ return;
++out:
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++}
++
++/**
++ * console_unlock - unlock the console system
++ *
++ * Releases the console_lock which the caller holds on the console system
++ * and the console driver list.
++ *
++ * While the console_lock was held, console output may have been buffered
++ * by printk(). If this is the case, console_unlock(); emits
++ * the output prior to releasing the lock.
++ *
++ * If there is output waiting, we wake /dev/kmsg and syslog() users.
++ *
++ * console_unlock(); may be called from any context.
++ */
++void console_unlock(void)
++{
++ static char text[LOG_LINE_MAX + PREFIX_MAX];
++ static u64 seen_seq;
++ unsigned long flags;
++ bool wake_klogd = false;
++ bool retry;
++
++ if (console_suspended) {
++ up(&console_sem);
++ return;
++ }
++
++ console_may_schedule = 0;
++
++ /* flush buffered message fragment immediately to console */
++ console_cont_flush(text, sizeof(text));
++again:
++ for (;;) {
++ struct printk_log *msg;
++ size_t len;
++ int level;
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ if (seen_seq != log_next_seq) {
++ wake_klogd = true;
++ seen_seq = log_next_seq;
++ }
++
++ if (console_seq < log_first_seq) {
++ /* messages are gone, move to first one */
++ console_seq = log_first_seq;
++ console_idx = log_first_idx;
++ console_prev = 0;
++ }
++skip:
++ if (console_seq == log_next_seq)
++ break;
++
++ msg = log_from_idx(console_idx);
++ if (msg->flags & LOG_NOCONS) {
++ /*
++ * Skip record we have buffered and already printed
++ * directly to the console when we received it.
++ */
++ console_idx = log_next(console_idx);
++ console_seq++;
++ /*
++ * We will get here again when we register a new
++ * CON_PRINTBUFFER console. Clear the flag so we
++ * will properly dump everything later.
++ */
++ msg->flags &= ~LOG_NOCONS;
++ console_prev = msg->flags;
++ goto skip;
++ }
++
++ level = msg->level;
++ len = msg_print_text(msg, console_prev, false,
++ text, sizeof(text));
++ console_idx = log_next(console_idx);
++ console_seq++;
++ console_prev = msg->flags;
++ raw_spin_unlock(&logbuf_lock);
++
++ stop_critical_timings(); /* don't trace print latency */
++ call_console_drivers(level, text, len);
++ start_critical_timings();
++ local_irq_restore(flags);
++ }
++ console_locked = 0;
++ mutex_release(&console_lock_dep_map, 1, _RET_IP_);
++
++ /* Release the exclusive_console once it is used */
++ if (unlikely(exclusive_console))
++ exclusive_console = NULL;
++
++ raw_spin_unlock(&logbuf_lock);
++
++ up(&console_sem);
++
++ /*
++ * Someone could have filled up the buffer again, so re-check if there's
++ * something to flush. In case we cannot trylock the console_sem again,
++ * there's a new owner and the console_unlock() from them will do the
++ * flush, no worries.
++ */
++ raw_spin_lock(&logbuf_lock);
++ retry = console_seq != log_next_seq;
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++
++ if (retry && console_trylock())
++ goto again;
++
++ if (wake_klogd)
++ wake_up_klogd();
++}
++EXPORT_SYMBOL(console_unlock);
++
++/**
++ * console_conditional_schedule - yield the CPU if required
++ *
++ * If the console code is currently allowed to sleep, and
++ * if this CPU should yield the CPU to another task, do
++ * so here.
++ *
++ * Must be called within console_lock();.
++ */
++void __sched console_conditional_schedule(void)
++{
++ if (console_may_schedule)
++ cond_resched();
++}
++EXPORT_SYMBOL(console_conditional_schedule);
++
++void console_unblank(void)
++{
++ struct console *c;
++
++ /*
++ * console_unblank can no longer be called in interrupt context unless
++ * oops_in_progress is set to 1..
++ */
++ if (oops_in_progress) {
++ if (down_trylock(&console_sem) != 0)
++ return;
++ } else
++ console_lock();
++
++ console_locked = 1;
++ console_may_schedule = 0;
++ for_each_console(c)
++ if ((c->flags & CON_ENABLED) && c->unblank)
++ c->unblank();
++ console_unlock();
++}
++
++/*
++ * Return the console tty driver structure and its associated index
++ */
++struct tty_driver *console_device(int *index)
++{
++ struct console *c;
++ struct tty_driver *driver = NULL;
++
++ console_lock();
++ for_each_console(c) {
++ if (!c->device)
++ continue;
++ driver = c->device(c, index);
++ if (driver)
++ break;
++ }
++ console_unlock();
++ return driver;
++}
++
++/*
++ * Prevent further output on the passed console device so that (for example)
++ * serial drivers can disable console output before suspending a port, and can
++ * re-enable output afterwards.
++ */
++void console_stop(struct console *console)
++{
++ console_lock();
++ console->flags &= ~CON_ENABLED;
++ console_unlock();
++}
++EXPORT_SYMBOL(console_stop);
++
++void console_start(struct console *console)
++{
++ console_lock();
++ console->flags |= CON_ENABLED;
++ console_unlock();
++}
++EXPORT_SYMBOL(console_start);
++
++static int __read_mostly keep_bootcon;
++
++static int __init keep_bootcon_setup(char *str)
++{
++ keep_bootcon = 1;
++ pr_info("debug: skip boot console de-registration.\n");
++
++ return 0;
++}
++
++early_param("keep_bootcon", keep_bootcon_setup);
++
++/*
++ * The console driver calls this routine during kernel initialization
++ * to register the console printing procedure with printk() and to
++ * print any messages that were printed by the kernel before the
++ * console driver was initialized.
++ *
++ * This can happen pretty early during the boot process (because of
++ * early_printk) - sometimes before setup_arch() completes - be careful
++ * of what kernel features are used - they may not be initialised yet.
++ *
++ * There are two types of consoles - bootconsoles (early_printk) and
++ * "real" consoles (everything which is not a bootconsole) which are
++ * handled differently.
++ * - Any number of bootconsoles can be registered at any time.
++ * - As soon as a "real" console is registered, all bootconsoles
++ * will be unregistered automatically.
++ * - Once a "real" console is registered, any attempt to register a
++ * bootconsoles will be rejected
++ */
++void register_console(struct console *newcon)
++{
++ int i;
++ unsigned long flags;
++ struct console *bcon = NULL;
++ struct console_cmdline *c;
++
++ if (console_drivers)
++ for_each_console(bcon)
++ if (WARN(bcon == newcon,
++ "console '%s%d' already registered\n",
++ bcon->name, bcon->index))
++ return;
++
++ /*
++ * before we register a new CON_BOOT console, make sure we don't
++ * already have a valid console
++ */
++ if (console_drivers && newcon->flags & CON_BOOT) {
++ /* find the last or real console */
++ for_each_console(bcon) {
++ if (!(bcon->flags & CON_BOOT)) {
++ pr_info("Too late to register bootconsole %s%d\n",
++ newcon->name, newcon->index);
++ return;
++ }
++ }
++ }
++
++ if (console_drivers && console_drivers->flags & CON_BOOT)
++ bcon = console_drivers;
++
++ if (preferred_console < 0 || bcon || !console_drivers)
++ preferred_console = selected_console;
++
++ if (newcon->early_setup)
++ newcon->early_setup();
++
++ /*
++ * See if we want to use this console driver. If we
++ * didn't select a console we take the first one
++ * that registers here.
++ */
++ if (preferred_console < 0) {
++ if (newcon->index < 0)
++ newcon->index = 0;
++ if (newcon->setup == NULL ||
++ newcon->setup(newcon, NULL) == 0) {
++ newcon->flags |= CON_ENABLED;
++ if (newcon->device) {
++ newcon->flags |= CON_CONSDEV;
++ preferred_console = 0;
++ }
++ }
++ }
++
++ /*
++ * See if this console matches one we selected on
++ * the command line.
++ */
++ for (i = 0, c = console_cmdline;
++ i < MAX_CMDLINECONSOLES && c->name[0];
++ i++, c++) {
++ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
++ if (strcmp(c->name, newcon->name) != 0)
++ continue;
++ if (newcon->index >= 0 &&
++ newcon->index != c->index)
++ continue;
++ if (newcon->index < 0)
++ newcon->index = c->index;
++
++ if (_braille_register_console(newcon, c))
++ return;
++
++ if (newcon->setup &&
++ newcon->setup(newcon, console_cmdline[i].options) != 0)
++ break;
++ newcon->flags |= CON_ENABLED;
++ newcon->index = c->index;
++ if (i == selected_console) {
++ newcon->flags |= CON_CONSDEV;
++ preferred_console = selected_console;
++ }
++ break;
++ }
++
++ if (!(newcon->flags & CON_ENABLED))
++ return;
++
++ /*
++ * If we have a bootconsole, and are switching to a real console,
++ * don't print everything out again, since when the boot console, and
++ * the real console are the same physical device, it's annoying to
++ * see the beginning boot messages twice
++ */
++ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
++ newcon->flags &= ~CON_PRINTBUFFER;
++
++ /*
++ * Put this console in the list - keep the
++ * preferred driver at the head of the list.
++ */
++ console_lock();
++ if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
++ newcon->next = console_drivers;
++ console_drivers = newcon;
++ if (newcon->next)
++ newcon->next->flags &= ~CON_CONSDEV;
++ } else {
++ newcon->next = console_drivers->next;
++ console_drivers->next = newcon;
++ }
++ if (newcon->flags & CON_PRINTBUFFER) {
++ /*
++ * console_unlock(); will print out the buffered messages
++ * for us.
++ */
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ console_seq = syslog_seq;
++ console_idx = syslog_idx;
++ console_prev = syslog_prev;
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ /*
++ * We're about to replay the log buffer. Only do this to the
++ * just-registered console to avoid excessive message spam to
++ * the already-registered consoles.
++ */
++ exclusive_console = newcon;
++ }
++ console_unlock();
++ console_sysfs_notify();
++
++ /*
++ * By unregistering the bootconsoles after we enable the real console
++ * we get the "console xxx enabled" message on all the consoles -
++ * boot consoles, real consoles, etc - this is to ensure that end
++ * users know there might be something in the kernel's log buffer that
++ * went to the bootconsole (that they do not see on the real console)
++ */
++ pr_info("%sconsole [%s%d] enabled\n",
++ (newcon->flags & CON_BOOT) ? "boot" : "" ,
++ newcon->name, newcon->index);
++ if (bcon &&
++ ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
++ !keep_bootcon) {
++ /* We need to iterate through all boot consoles, to make
++ * sure we print everything out, before we unregister them.
++ */
++ for_each_console(bcon)
++ if (bcon->flags & CON_BOOT)
++ unregister_console(bcon);
++ }
++}
++EXPORT_SYMBOL(register_console);
++
++int unregister_console(struct console *console)
++{
++ struct console *a, *b;
++ int res;
++
++ pr_info("%sconsole [%s%d] disabled\n",
++ (console->flags & CON_BOOT) ? "boot" : "" ,
++ console->name, console->index);
++
++ res = _braille_unregister_console(console);
++ if (res)
++ return res;
++
++ res = 1;
++ console_lock();
++ if (console_drivers == console) {
++ console_drivers=console->next;
++ res = 0;
++ } else if (console_drivers) {
++ for (a=console_drivers->next, b=console_drivers ;
++ a; b=a, a=b->next) {
++ if (a == console) {
++ b->next = a->next;
++ res = 0;
++ break;
++ }
++ }
++ }
++
++ /*
++ * If this isn't the last console and it has CON_CONSDEV set, we
++ * need to set it on the next preferred console.
++ */
++ if (console_drivers != NULL && console->flags & CON_CONSDEV)
++ console_drivers->flags |= CON_CONSDEV;
++
++ console_unlock();
++ console_sysfs_notify();
++ return res;
++}
++EXPORT_SYMBOL(unregister_console);
++
++static int __init printk_late_init(void)
++{
++ struct console *con;
++
++ for_each_console(con) {
++ if (!keep_bootcon && con->flags & CON_BOOT) {
++ unregister_console(con);
++ }
++ }
++ hotcpu_notifier(console_cpu_notify, 0);
++ return 0;
++}
++late_initcall(printk_late_init);
++
++#if defined CONFIG_PRINTK
++/*
++ * Delayed printk version, for scheduler-internal messages:
++ */
++#define PRINTK_BUF_SIZE 512
++
++#define PRINTK_PENDING_WAKEUP 0x01
++#define PRINTK_PENDING_SCHED 0x02
++
++static DEFINE_PER_CPU(int, printk_pending);
++static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
++
++static void wake_up_klogd_work_func(struct irq_work *irq_work)
++{
++ int pending = __this_cpu_xchg(printk_pending, 0);
++
++ if (pending & PRINTK_PENDING_SCHED) {
++ char *buf = __get_cpu_var(printk_sched_buf);
++ pr_warn("[sched_delayed] %s", buf);
++ }
++
++ if (pending & PRINTK_PENDING_WAKEUP)
++ wake_up_interruptible(&log_wait);
++}
++
++static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
++ .func = wake_up_klogd_work_func,
++ .flags = IRQ_WORK_LAZY,
++};
++
++void wake_up_klogd(void)
++{
++ preempt_disable();
++ if (waitqueue_active(&log_wait)) {
++ this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
++ irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
++ }
++ preempt_enable();
++}
++
++int printk_deferred(const char *fmt, ...)
++{
++ unsigned long flags;
++ va_list args;
++ char *buf;
++ int r;
++
++ local_irq_save(flags);
++ buf = __get_cpu_var(printk_sched_buf);
++
++ va_start(args, fmt);
++ r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
++ va_end(args);
++
++ __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
++ irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
++ local_irq_restore(flags);
++
++ return r;
++}
++
++/*
++ * printk rate limiting, lifted from the networking subsystem.
++ *
++ * This enforces a rate limit: not more than 10 kernel messages
++ * every 5s to make a denial-of-service attack impossible.
++ */
++DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
++
++int __printk_ratelimit(const char *func)
++{
++ return ___ratelimit(&printk_ratelimit_state, func);
++}
++EXPORT_SYMBOL(__printk_ratelimit);
++
++/**
++ * printk_timed_ratelimit - caller-controlled printk ratelimiting
++ * @caller_jiffies: pointer to caller's state
++ * @interval_msecs: minimum interval between prints
++ *
++ * printk_timed_ratelimit() returns true if more than @interval_msecs
++ * milliseconds have elapsed since the last time printk_timed_ratelimit()
++ * returned true.
++ */
++bool printk_timed_ratelimit(unsigned long *caller_jiffies,
++ unsigned int interval_msecs)
++{
++ if (*caller_jiffies == 0
++ || !time_in_range(jiffies, *caller_jiffies,
++ *caller_jiffies
++ + msecs_to_jiffies(interval_msecs))) {
++ *caller_jiffies = jiffies;
++ return true;
++ }
++ return false;
++}
++EXPORT_SYMBOL(printk_timed_ratelimit);
++
++static DEFINE_SPINLOCK(dump_list_lock);
++static LIST_HEAD(dump_list);
++
++/**
++ * kmsg_dump_register - register a kernel log dumper.
++ * @dumper: pointer to the kmsg_dumper structure
++ *
++ * Adds a kernel log dumper to the system. The dump callback in the
++ * structure will be called when the kernel oopses or panics and must be
++ * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
++ */
++int kmsg_dump_register(struct kmsg_dumper *dumper)
++{
++ unsigned long flags;
++ int err = -EBUSY;
++
++ /* The dump callback needs to be set */
++ if (!dumper->dump)
++ return -EINVAL;
++
++ spin_lock_irqsave(&dump_list_lock, flags);
++ /* Don't allow registering multiple times */
++ if (!dumper->registered) {
++ dumper->registered = 1;
++ list_add_tail_rcu(&dumper->list, &dump_list);
++ err = 0;
++ }
++ spin_unlock_irqrestore(&dump_list_lock, flags);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(kmsg_dump_register);
++
++/**
++ * kmsg_dump_unregister - unregister a kmsg dumper.
++ * @dumper: pointer to the kmsg_dumper structure
++ *
++ * Removes a dump device from the system. Returns zero on success and
++ * %-EINVAL otherwise.
++ */
++int kmsg_dump_unregister(struct kmsg_dumper *dumper)
++{
++ unsigned long flags;
++ int err = -EINVAL;
++
++ spin_lock_irqsave(&dump_list_lock, flags);
++ if (dumper->registered) {
++ dumper->registered = 0;
++ list_del_rcu(&dumper->list);
++ err = 0;
++ }
++ spin_unlock_irqrestore(&dump_list_lock, flags);
++ synchronize_rcu();
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
++
++static bool always_kmsg_dump;
++module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
++
++/**
++ * kmsg_dump - dump kernel log to kernel message dumpers.
++ * @reason: the reason (oops, panic etc) for dumping
++ *
++ * Call each of the registered dumper's dump() callback, which can
++ * retrieve the kmsg records with kmsg_dump_get_line() or
++ * kmsg_dump_get_buffer().
++ */
++void kmsg_dump(enum kmsg_dump_reason reason)
++{
++ struct kmsg_dumper *dumper;
++ unsigned long flags;
++
++ if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
++ return;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(dumper, &dump_list, list) {
++ if (dumper->max_reason && reason > dumper->max_reason)
++ continue;
++
++ /* initialize iterator with data about the stored records */
++ dumper->active = true;
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ dumper->cur_seq = clear_seq;
++ dumper->cur_idx = clear_idx;
++ dumper->next_seq = log_next_seq;
++ dumper->next_idx = log_next_idx;
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++
++ /* invoke dumper which will iterate over records */
++ dumper->dump(dumper, reason);
++
++ /* reset iterator */
++ dumper->active = false;
++ }
++ rcu_read_unlock();
++}
++
++/**
++ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
++ * @dumper: registered kmsg dumper
++ * @syslog: include the "<4>" prefixes
++ * @line: buffer to copy the line to
++ * @size: maximum size of the buffer
++ * @len: length of line placed into buffer
++ *
++ * Start at the beginning of the kmsg buffer, with the oldest kmsg
++ * record, and copy one record into the provided buffer.
++ *
++ * Consecutive calls will return the next available record moving
++ * towards the end of the buffer with the youngest messages.
++ *
++ * A return value of FALSE indicates that there are no more records to
++ * read.
++ *
++ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
++ */
++bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
++ char *line, size_t size, size_t *len)
++{
++ struct printk_log *msg;
++ size_t l = 0;
++ bool ret = false;
++
++ if (!dumper->active)
++ goto out;
++
++ if (dumper->cur_seq < log_first_seq) {
++ /* messages are gone, move to first available one */
++ dumper->cur_seq = log_first_seq;
++ dumper->cur_idx = log_first_idx;
++ }
++
++ /* last entry */
++ if (dumper->cur_seq >= log_next_seq)
++ goto out;
++
++ msg = log_from_idx(dumper->cur_idx);
++ l = msg_print_text(msg, 0, syslog, line, size);
++
++ dumper->cur_idx = log_next(dumper->cur_idx);
++ dumper->cur_seq++;
++ ret = true;
++out:
++ if (len)
++ *len = l;
++ return ret;
++}
++
++/**
++ * kmsg_dump_get_line - retrieve one kmsg log line
++ * @dumper: registered kmsg dumper
++ * @syslog: include the "<4>" prefixes
++ * @line: buffer to copy the line to
++ * @size: maximum size of the buffer
++ * @len: length of line placed into buffer
++ *
++ * Start at the beginning of the kmsg buffer, with the oldest kmsg
++ * record, and copy one record into the provided buffer.
++ *
++ * Consecutive calls will return the next available record moving
++ * towards the end of the buffer with the youngest messages.
++ *
++ * A return value of FALSE indicates that there are no more records to
++ * read.
++ */
++bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
++ char *line, size_t size, size_t *len)
++{
++ unsigned long flags;
++ bool ret;
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
++
++/**
++ * kmsg_dump_get_buffer - copy kmsg log lines
++ * @dumper: registered kmsg dumper
++ * @syslog: include the "<4>" prefixes
++ * @buf: buffer to copy the line to
++ * @size: maximum size of the buffer
++ * @len: length of line placed into buffer
++ *
++ * Start at the end of the kmsg buffer and fill the provided buffer
++ * with as many of the the *youngest* kmsg records that fit into it.
++ * If the buffer is large enough, all available kmsg records will be
++ * copied with a single call.
++ *
++ * Consecutive calls will fill the buffer with the next block of
++ * available older records, not including the earlier retrieved ones.
++ *
++ * A return value of FALSE indicates that there are no more records to
++ * read.
++ */
++bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
++ char *buf, size_t size, size_t *len)
++{
++ unsigned long flags;
++ u64 seq;
++ u32 idx;
++ u64 next_seq;
++ u32 next_idx;
++ enum log_flags prev;
++ size_t l = 0;
++ bool ret = false;
++
++ if (!dumper->active)
++ goto out;
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ if (dumper->cur_seq < log_first_seq) {
++ /* messages are gone, move to first available one */
++ dumper->cur_seq = log_first_seq;
++ dumper->cur_idx = log_first_idx;
++ }
++
++ /* last entry */
++ if (dumper->cur_seq >= dumper->next_seq) {
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ goto out;
++ }
++
++ /* calculate length of entire buffer */
++ seq = dumper->cur_seq;
++ idx = dumper->cur_idx;
++ prev = 0;
++ while (seq < dumper->next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++
++ l += msg_print_text(msg, prev, true, NULL, 0);
++ idx = log_next(idx);
++ seq++;
++ prev = msg->flags;
++ }
++
++ /* move first record forward until length fits into the buffer */
++ seq = dumper->cur_seq;
++ idx = dumper->cur_idx;
++ prev = 0;
++ while (l > size && seq < dumper->next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++
++ l -= msg_print_text(msg, prev, true, NULL, 0);
++ idx = log_next(idx);
++ seq++;
++ prev = msg->flags;
++ }
++
++ /* last message in next interation */
++ next_seq = seq;
++ next_idx = idx;
++
++ l = 0;
++ while (seq < dumper->next_seq) {
++ struct printk_log *msg = log_from_idx(idx);
++
++ l += msg_print_text(msg, prev, syslog, buf + l, size - l);
++ idx = log_next(idx);
++ seq++;
++ prev = msg->flags;
++ }
++
++ dumper->next_seq = next_seq;
++ dumper->next_idx = next_idx;
++ ret = true;
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++out:
++ if (len)
++ *len = l;
++ return ret;
++}
++EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
++
++/**
++ * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
++ * @dumper: registered kmsg dumper
++ *
++ * Reset the dumper's iterator so that kmsg_dump_get_line() and
++ * kmsg_dump_get_buffer() can be called again and used multiple
++ * times within the same dumper.dump() callback.
++ *
++ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
++ */
++void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
++{
++ dumper->cur_seq = clear_seq;
++ dumper->cur_idx = clear_idx;
++ dumper->next_seq = log_next_seq;
++ dumper->next_idx = log_next_idx;
++}
++
++/**
++ * kmsg_dump_rewind - reset the interator
++ * @dumper: registered kmsg dumper
++ *
++ * Reset the dumper's iterator so that kmsg_dump_get_line() and
++ * kmsg_dump_get_buffer() can be called again and used multiple
++ * times within the same dumper.dump() callback.
++ */
++void kmsg_dump_rewind(struct kmsg_dumper *dumper)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
++ kmsg_dump_rewind_nolock(dumper);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++}
++EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
++
++static char dump_stack_arch_desc_str[128];
++
++/**
++ * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
++ * @fmt: printf-style format string
++ * @...: arguments for the format string
++ *
++ * The configured string will be printed right after utsname during task
++ * dumps. Usually used to add arch-specific system identifiers. If an
++ * arch wants to make use of such an ID string, it should initialize this
++ * as soon as possible during boot.
++ */
++void __init dump_stack_set_arch_desc(const char *fmt, ...)
++{
++ va_list args;
++
++ va_start(args, fmt);
++ vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
++ fmt, args);
++ va_end(args);
++}
++
++/**
++ * dump_stack_print_info - print generic debug info for dump_stack()
++ * @log_lvl: log level
++ *
++ * Arch-specific dump_stack() implementations can use this function to
++ * print out the same debug information as the generic dump_stack().
++ */
++void dump_stack_print_info(const char *log_lvl)
++{
++ printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n",
++ log_lvl, raw_smp_processor_id(), current->pid, current->comm,
++ print_tainted(), init_utsname()->release,
++ (int)strcspn(init_utsname()->version, " "),
++ init_utsname()->version);
++
++ if (dump_stack_arch_desc_str[0] != '\0')
++ printk("%sHardware name: %s\n",
++ log_lvl, dump_stack_arch_desc_str);
++
++ print_worker_info(log_lvl, current);
++}
++
++/**
++ * show_regs_print_info - print generic debug info for show_regs()
++ * @log_lvl: log level
++ *
++ * show_regs() implementations can use this function to print out generic
++ * debug information.
++ */
++void show_regs_print_info(const char *log_lvl)
++{
++ dump_stack_print_info(log_lvl);
++
++ printk("%stask: %p ti: %p task.ti: %p\n",
++ log_lvl, current, current_thread_info(),
++ task_thread_info(current));
++}
++
++#endif
diff --git a/kernel/profile.c b/kernel/profile.c
index 76b8e77..a2930e8 100644
--- a/kernel/profile.c
@@ -94920,7 +98151,7 @@ index 9d557df..7207dae 100644
error = -EINVAL;
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
-index ea7ec7f..798623e 100644
+index ea7ec7f..da588ba 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -86,6 +86,13 @@
@@ -94937,7 +98168,11 @@ index ea7ec7f..798623e 100644
/* External variables not in a header file. */
extern int sysctl_overcommit_memory;
-@@ -112,18 +119,18 @@ extern int blk_iopoll_enabled;
+@@ -109,21 +116,22 @@ extern int sysctl_nr_trim_pages;
+ #ifdef CONFIG_BLOCK
+ extern int blk_iopoll_enabled;
+ #endif
++extern int sysctl_modify_ldt;
/* Constants used for minimum and maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
@@ -94965,12 +98200,13 @@ index ea7ec7f..798623e 100644
#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
-@@ -165,10 +172,13 @@ static int proc_taint(struct ctl_table *table, int write,
+@@ -165,10 +173,13 @@ static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
-#ifdef CONFIG_PRINTK
- static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-#endif
+
@@ -94981,7 +98217,7 @@ index ea7ec7f..798623e 100644
#ifdef CONFIG_MAGIC_SYSRQ
/* Note: sysrq code uses it's own private copy */
-@@ -191,6 +201,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
+@@ -191,6 +202,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
}
#endif
@@ -94989,7 +98225,7 @@ index ea7ec7f..798623e 100644
static struct ctl_table root_table[];
static struct ctl_table_root sysctl_table_root;
-@@ -220,6 +231,20 @@ extern struct ctl_table epoll_table[];
+@@ -220,6 +232,20 @@ extern struct ctl_table epoll_table[];
int sysctl_legacy_va_layout;
#endif
@@ -95010,7 +98246,7 @@ index ea7ec7f..798623e 100644
/* The default sysctl tables: */
static struct ctl_table root_table[] = {
-@@ -266,6 +291,22 @@ static int max_extfrag_threshold = 1000;
+@@ -266,6 +292,22 @@ static int max_extfrag_threshold = 1000;
#endif
static struct ctl_table kern_table[] = {
@@ -95033,7 +98269,7 @@ index ea7ec7f..798623e 100644
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
-@@ -420,7 +461,7 @@ static struct ctl_table kern_table[] = {
+@@ -420,7 +462,7 @@ static struct ctl_table kern_table[] = {
.data = core_pattern,
.maxlen = CORENAME_MAX_SIZE,
.mode = 0644,
@@ -95042,7 +98278,7 @@ index ea7ec7f..798623e 100644
},
{
.procname = "core_pipe_limit",
-@@ -550,7 +591,7 @@ static struct ctl_table kern_table[] = {
+@@ -550,7 +592,7 @@ static struct ctl_table kern_table[] = {
.data = &modprobe_path,
.maxlen = KMOD_PATH_LEN,
.mode = 0644,
@@ -95051,7 +98287,21 @@ index ea7ec7f..798623e 100644
},
{
.procname = "modules_disabled",
-@@ -717,16 +758,20 @@ static struct ctl_table kern_table[] = {
+@@ -558,7 +600,7 @@ static struct ctl_table kern_table[] = {
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ /* only handle a transition from default "0" to "1" */
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_dointvec_minmax_secure,
+ .extra1 = &one,
+ .extra2 = &one,
+ },
+@@ -713,20 +755,24 @@ static struct ctl_table kern_table[] = {
+ .data = &dmesg_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax_sysadmin,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
.extra1 = &zero,
.extra2 = &one,
},
@@ -95061,7 +98311,8 @@ index ea7ec7f..798623e 100644
.data = &kptr_restrict,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax_sysadmin,
+- .proc_handler = proc_dointvec_minmax_sysadmin,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
+#ifdef CONFIG_GRKERNSEC_HIDESYM
+ .extra1 = &two,
+#else
@@ -95073,7 +98324,23 @@ index ea7ec7f..798623e 100644
{
.procname = "ngroups_max",
.data = &ngroups_max,
-@@ -957,10 +1002,17 @@ static struct ctl_table kern_table[] = {
+@@ -831,6 +877,15 @@ static struct ctl_table kern_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
++ {
++ .procname = "modify_ldt",
++ .data = &sysctl_modify_ldt,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
++ .extra1 = &zero,
++ .extra2 = &one,
++ },
+ #endif
+ #if defined(CONFIG_MMU)
+ {
+@@ -957,10 +1012,17 @@ static struct ctl_table kern_table[] = {
*/
{
.procname = "perf_event_paranoid",
@@ -95084,7 +98351,7 @@ index ea7ec7f..798623e 100644
.mode = 0644,
- .proc_handler = proc_dointvec,
+ /* go ahead, be a hero */
-+ .proc_handler = proc_dointvec_minmax_sysadmin,
++ .proc_handler = proc_dointvec_minmax_secure_sysadmin,
+ .extra1 = &neg_one,
+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
+ .extra2 = &three,
@@ -95094,7 +98361,7 @@ index ea7ec7f..798623e 100644
},
{
.procname = "perf_event_mlock_kb",
-@@ -1216,6 +1268,13 @@ static struct ctl_table vm_table[] = {
+@@ -1216,6 +1278,13 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
@@ -95108,7 +98375,7 @@ index ea7ec7f..798623e 100644
#else
{
.procname = "nr_trim_pages",
-@@ -1499,7 +1558,7 @@ static struct ctl_table fs_table[] = {
+@@ -1499,7 +1568,7 @@ static struct ctl_table fs_table[] = {
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
@@ -95117,7 +98384,7 @@ index ea7ec7f..798623e 100644
.extra1 = &zero,
.extra2 = &two,
},
-@@ -1720,6 +1779,17 @@ static int test_perm(int mode, int op)
+@@ -1720,6 +1789,17 @@ static int test_perm(int mode, int op)
int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
{
int mode;
@@ -95135,7 +98402,7 @@ index ea7ec7f..798623e 100644
if (root->permissions)
mode = root->permissions(root, current->nsproxy, table);
-@@ -1732,7 +1802,9 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
+@@ -1732,7 +1812,9 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
{
for (; table->procname; table++) {
@@ -95146,7 +98413,7 @@ index ea7ec7f..798623e 100644
if (table->child)
sysctl_set_parent(table, table->child);
}
-@@ -1856,7 +1928,8 @@ struct ctl_table_header *__register_sysctl_paths(
+@@ -1856,7 +1938,8 @@ struct ctl_table_header *__register_sysctl_paths(
const struct ctl_path *path, struct ctl_table *table)
{
struct ctl_table_header *header;
@@ -95156,7 +98423,7 @@ index ea7ec7f..798623e 100644
unsigned int n, npath;
struct ctl_table_set *set;
-@@ -1877,7 +1950,7 @@ struct ctl_table_header *__register_sysctl_paths(
+@@ -1877,7 +1960,7 @@ struct ctl_table_header *__register_sysctl_paths(
if (!header)
return NULL;
@@ -95165,7 +98432,7 @@ index ea7ec7f..798623e 100644
/* Now connect the dots */
prevp = &header->ctl_table;
-@@ -2124,6 +2197,16 @@ int proc_dostring(struct ctl_table *table, int write,
+@@ -2124,6 +2207,16 @@ int proc_dostring(struct ctl_table *table, int write,
buffer, lenp, ppos);
}
@@ -95182,7 +98449,7 @@ index ea7ec7f..798623e 100644
static size_t proc_skip_spaces(char **buf)
{
size_t ret;
-@@ -2229,6 +2312,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
+@@ -2229,6 +2322,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
len = strlen(tmp);
if (len > *size)
len = *size;
@@ -95191,7 +98458,52 @@ index ea7ec7f..798623e 100644
if (copy_to_user(*buf, tmp, len))
return -EFAULT;
*size -= len;
-@@ -2393,7 +2478,7 @@ int proc_dointvec(struct ctl_table *table, int write,
+@@ -2386,6 +2481,44 @@ int proc_dointvec(struct ctl_table *table, int write,
+ NULL,NULL);
+ }
+
++static int do_proc_dointvec_conv_secure(bool *negp, unsigned long *lvalp,
++ int *valp,
++ int write, void *data)
++{
++ if (write) {
++ if (*negp) {
++ if (*lvalp > (unsigned long) INT_MAX + 1)
++ return -EINVAL;
++ pax_open_kernel();
++ *valp = -*lvalp;
++ pax_close_kernel();
++ } else {
++ if (*lvalp > (unsigned long) INT_MAX)
++ return -EINVAL;
++ pax_open_kernel();
++ *valp = *lvalp;
++ pax_close_kernel();
++ }
++ } else {
++ int val = *valp;
++ if (val < 0) {
++ *negp = true;
++ *lvalp = (unsigned long)-val;
++ } else {
++ *negp = false;
++ *lvalp = (unsigned long)val;
++ }
++ }
++ return 0;
++}
++
++int proc_dointvec_secure(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ return do_proc_dointvec(table,write,buffer,lenp,ppos,
++ do_proc_dointvec_conv_secure,NULL);
++}
++
+ /*
+ * Taint values can only be increased
+ * This means we can safely use a temporary.
+@@ -2393,7 +2526,7 @@ int proc_dointvec(struct ctl_table *table, int write,
static int proc_taint(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -95200,26 +98512,73 @@ index ea7ec7f..798623e 100644
unsigned long tmptaint = get_taint();
int err;
-@@ -2421,7 +2506,6 @@ static int proc_taint(struct ctl_table *table, int write,
+@@ -2421,16 +2554,14 @@ static int proc_taint(struct ctl_table *table, int write,
return err;
}
-#ifdef CONFIG_PRINTK
- static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_secure_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
-@@ -2430,7 +2514,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++ return proc_dointvec_minmax_secure(table, write, buffer, lenp, ppos);
}
-#endif
struct do_proc_dointvec_minmax_conv_param {
int *min;
-@@ -2488,6 +2571,34 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
+@@ -2461,6 +2592,32 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
+ return 0;
+ }
+
++static int do_proc_dointvec_minmax_conv_secure(bool *negp, unsigned long *lvalp,
++ int *valp,
++ int write, void *data)
++{
++ struct do_proc_dointvec_minmax_conv_param *param = data;
++ if (write) {
++ int val = *negp ? -*lvalp : *lvalp;
++ if ((param->min && *param->min > val) ||
++ (param->max && *param->max < val))
++ return -EINVAL;
++ pax_open_kernel();
++ *valp = val;
++ pax_close_kernel();
++ } else {
++ int val = *valp;
++ if (val < 0) {
++ *negp = true;
++ *lvalp = (unsigned long)-val;
++ } else {
++ *negp = false;
++ *lvalp = (unsigned long)val;
++ }
++ }
++ return 0;
++}
++
+ /**
+ * proc_dointvec_minmax - read a vector of integers with min/max values
+ * @table: the sysctl table
+@@ -2488,6 +2645,45 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
do_proc_dointvec_minmax_conv, &param);
}
++int proc_dointvec_minmax_secure(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct do_proc_dointvec_minmax_conv_param param = {
++ .min = (int *) table->extra1,
++ .max = (int *) table->extra2,
++ };
++ return do_proc_dointvec(table, write, buffer, lenp, ppos,
++ do_proc_dointvec_minmax_conv_secure, &param);
++}
++
+static void validate_coredump_safety(void)
+{
+ if (suid_dumpable == SUID_DUMPABLE_SAFE &&
@@ -95251,7 +98610,7 @@ index ea7ec7f..798623e 100644
static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos,
-@@ -2545,8 +2656,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
+@@ -2545,8 +2741,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
*i = val;
} else {
val = convdiv * (*i) / convmul;
@@ -95264,7 +98623,7 @@ index ea7ec7f..798623e 100644
err = proc_put_long(&buffer, &left, val, false);
if (err)
break;
-@@ -2941,6 +3055,12 @@ int proc_dostring(struct ctl_table *table, int write,
+@@ -2941,6 +3140,12 @@ int proc_dostring(struct ctl_table *table, int write,
return -ENOSYS;
}
@@ -95277,7 +98636,7 @@ index ea7ec7f..798623e 100644
int proc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
-@@ -2997,6 +3117,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
+@@ -2997,6 +3202,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
EXPORT_SYMBOL(proc_dostring);
@@ -96783,10 +100142,22 @@ index bd2bea9..6b3c95e 100644
return false;
diff --git a/lib/kobject.c b/lib/kobject.c
-index 83bd5b3..8a0c75f 100644
+index 83bd5b3..757af67 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
-@@ -844,7 +844,7 @@ static struct kset *kset_create(const char *name,
+@@ -296,8 +296,9 @@ error:
+ }
+ EXPORT_SYMBOL(kobject_init);
+
+-static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
+- const char *fmt, va_list vargs)
++static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
++ struct kobject *parent,
++ const char *fmt, va_list vargs)
+ {
+ int retval;
+
+@@ -844,7 +845,7 @@ static struct kset *kset_create(const char *name,
kset = kzalloc(sizeof(*kset), GFP_KERNEL);
if (!kset)
return NULL;
@@ -96795,7 +100166,7 @@ index 83bd5b3..8a0c75f 100644
if (retval) {
kfree(kset);
return NULL;
-@@ -898,9 +898,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
+@@ -898,9 +899,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
static DEFINE_SPINLOCK(kobj_ns_type_lock);
@@ -97566,7 +100937,7 @@ index 1f44bdc..009bfe8 100644
+}
+#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
-index ae02e42..4ffc938 100644
+index ae02e42..cd72015 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -16,6 +16,9 @@
@@ -97592,10 +100963,11 @@ index ae02e42..4ffc938 100644
return string(buf, end, uuid, spec);
}
+-int kptr_restrict __read_mostly;
+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+int kptr_restrict __read_mostly = 2;
++int kptr_restrict __read_only = 2;
+#else
- int kptr_restrict __read_mostly;
++int kptr_restrict __read_only;
+#endif
/*
@@ -103475,6 +106847,18 @@ index 23f45ce..c748f1a 100644
__AAL_STAT_ITEMS
#undef __HANDLE_ITEM
}
+diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
+index c6715ee..69745c0 100644
+--- a/net/ax25/ax25_subr.c
++++ b/net/ax25/ax25_subr.c
+@@ -265,6 +265,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
+ {
+ ax25_clear_queues(ax25);
+
++ ax25_stop_heartbeat(ax25);
+ ax25_stop_t1timer(ax25);
+ ax25_stop_t2timer(ax25);
+ ax25_stop_t3timer(ax25);
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index ebe0ef3..d5b0a8e 100644
--- a/net/ax25/sysctl_net_ax25.c
@@ -104847,7 +108231,7 @@ index 80aeac9..b08d0a8 100644
return -ENODEV;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 5b412f0..e251eea 100644
+index 5b412f0..595dfcd 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -57,7 +57,7 @@ struct rtnl_link {
@@ -104885,6 +108269,24 @@ index 5b412f0..e251eea 100644
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+@@ -1484,10 +1487,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ goto errout;
+
+ nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
+- if (nla_type(attr) != IFLA_VF_PORT)
+- continue;
+- err = nla_parse_nested(port, IFLA_PORT_MAX,
+- attr, ifla_port_policy);
++ if (nla_type(attr) != IFLA_VF_PORT ||
++ nla_len(attr) < NLA_HDRLEN) {
++ err = -EINVAL;
++ goto errout;
++ }
++ err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
++ ifla_port_policy);
+ if (err < 0)
+ goto errout;
+ if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/scm.c b/net/core/scm.c
index ff52ad0..aff1c0f 100644
--- a/net/core/scm.c
@@ -116088,10 +119490,10 @@ index 0000000..0c96d8a
+}
diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
new file mode 100644
-index 0000000..da184c5
+index 0000000..c5de280
--- /dev/null
+++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,564 @@
+@@ -0,0 +1,568 @@
+/*
+ * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
+ * Copyright 2011-2015 by PaX Team <pageexec@freemail.hu>
@@ -116533,7 +119935,7 @@ index 0000000..da184c5
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 5000
-+#elif BUILDING_GCC_VERSION >= 4009
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -116558,7 +119960,11 @@ index 0000000..da184c5
+class check_local_variables_pass : public gimple_opt_pass {
+public:
+ check_local_variables_pass() : gimple_opt_pass(check_local_variables_pass_data, g) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return check_local_variables(); }
++#else
+ unsigned int execute() { return check_local_variables(); }
++#endif
+};
+}
+
@@ -116658,10 +120064,10 @@ index 0000000..da184c5
+}
diff --git a/tools/gcc/gcc-common.h b/tools/gcc/gcc-common.h
new file mode 100644
-index 0000000..1d20e32
+index 0000000..70924d4
--- /dev/null
+++ b/tools/gcc/gcc-common.h
-@@ -0,0 +1,689 @@
+@@ -0,0 +1,787 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
@@ -116740,6 +120146,8 @@ index 0000000..1d20e32
+#include "tree-flow.h"
+#else
+#include "tree-cfgcleanup.h"
++#include "tree-ssa-operands.h"
++#include "tree-into-ssa.h"
+#endif
+
+#if BUILDING_GCC_VERSION >= 4008
@@ -117070,6 +120478,76 @@ index 0000000..1d20e32
+typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d gphi;
+typedef union gimple_statement_d greturn;
++
++static inline gasm *as_a_gasm(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gasm *as_a_const_gasm(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gassign *as_a_gassign(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gassign *as_a_const_gassign(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gcall *as_a_gcall(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gcall *as_a_const_gcall(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gcond *as_a_gcond(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gcond *as_a_const_gcond(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gdebug *as_a_gdebug(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gphi *as_a_gphi(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const gphi *as_a_const_gphi(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline greturn *as_a_greturn(gimple stmt)
++{
++ return stmt;
++}
++
++static inline const greturn *as_a_const_greturn(const_gimple stmt)
++{
++ return stmt;
++}
+#endif
+
+#if BUILDING_GCC_VERSION == 4008
@@ -117089,34 +120567,35 @@ index 0000000..1d20e32
+#if BUILDING_GCC_VERSION <= 4009
+#define TODO_verify_il 0
+#define AVAIL_INTERPOSABLE AVAIL_OVERWRITABLE
-+#endif
+
-+#if BUILDING_GCC_VERSION == 4009
-+typedef struct gimple_statement_base gasm;
-+typedef struct gimple_statement_base gassign;
-+typedef struct gimple_statement_base gcall;
-+typedef struct gimple_statement_base gcond;
-+typedef struct gimple_statement_base gdebug;
-+typedef struct gimple_statement_base gphi;
-+typedef struct gimple_statement_base greturn;
-+#endif
++#define section_name_prefix LTO_SECTION_NAME_PREFIX
++#define fatal_error(loc, gmsgid, ...) fatal_error((gmsgid), __VA_ARGS__)
+
-+#if BUILDING_GCC_VERSION <= 4009
+typedef struct rtx_def rtx_insn;
+
+static inline void set_decl_section_name(tree node, const char *value)
+{
+ DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value);
+}
++#endif
++
++#if BUILDING_GCC_VERSION == 4009
++typedef struct gimple_statement_asm gasm;
++typedef struct gimple_statement_base gassign;
++typedef struct gimple_statement_call gcall;
++typedef struct gimple_statement_base gcond;
++typedef struct gimple_statement_base gdebug;
++typedef struct gimple_statement_phi gphi;
++typedef struct gimple_statement_base greturn;
+
+static inline gasm *as_a_gasm(gimple stmt)
+{
-+ return stmt;
++ return as_a<gasm>(stmt);
+}
+
+static inline const gasm *as_a_const_gasm(const_gimple stmt)
+{
-+ return stmt;
++ return as_a<const gasm>(stmt);
+}
+
+static inline gassign *as_a_gassign(gimple stmt)
@@ -117131,24 +120610,44 @@ index 0000000..1d20e32
+
+static inline gcall *as_a_gcall(gimple stmt)
+{
-+ return stmt;
++ return as_a<gcall>(stmt);
+}
+
+static inline const gcall *as_a_const_gcall(const_gimple stmt)
+{
++ return as_a<const gcall>(stmt);
++}
++
++static inline gcond *as_a_gcond(gimple stmt)
++{
+ return stmt;
+}
+
-+static inline gphi *as_a_gphi(gimple stmt)
++static inline const gcond *as_a_const_gcond(const_gimple stmt)
+{
+ return stmt;
+}
+
-+static inline const gphi *as_a_const_gphi(const_gimple stmt)
++static inline gdebug *as_a_gdebug(gimple stmt)
+{
+ return stmt;
+}
+
++static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
++{
++ return stmt;
++}
++
++static inline gphi *as_a_gphi(gimple stmt)
++{
++ return as_a<gphi>(stmt);
++}
++
++static inline const gphi *as_a_const_gphi(const_gimple stmt)
++{
++ return as_a<const gphi>(stmt);
++}
++
+static inline greturn *as_a_greturn(gimple stmt)
+{
+ return stmt;
@@ -117210,6 +120709,11 @@ index 0000000..1d20e32
+ varpool_node::add(decl);
+}
+
++static inline unsigned int rebuild_cgraph_edges(void)
++{
++ return cgraph_edge::rebuild_edges();
++}
++
+static inline cgraph_node_ptr cgraph_function_node(cgraph_node_ptr node, enum availability *availability)
+{
+ return node->function_symbol(availability);
@@ -118594,10 +122098,10 @@ index 0000000..ac6f9b4
+}
diff --git a/tools/gcc/randomize_layout_plugin.c b/tools/gcc/randomize_layout_plugin.c
new file mode 100644
-index 0000000..713be61
+index 0000000..40dcfa9
--- /dev/null
+++ b/tools/gcc/randomize_layout_plugin.c
-@@ -0,0 +1,918 @@
+@@ -0,0 +1,922 @@
+/*
+ * Copyright 2014,2015 by Open Source Security, Inc., Brad Spengler <spender@grsecurity.net>
+ * and PaX Team <pageexec@freemail.hu>
@@ -119443,7 +122947,11 @@ index 0000000..713be61
+class randomize_layout_bad_cast : public gimple_opt_pass {
+public:
+ randomize_layout_bad_cast() : gimple_opt_pass(randomize_layout_bad_cast_data, g) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return find_bad_casts(); }
++#else
+ unsigned int execute() { return find_bad_casts(); }
++#endif
+};
+}
+#endif
@@ -119660,15 +123168,16 @@ index 0000000..12b1e3b
+exit 0
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
new file mode 100644
-index 0000000..c43901f
+index 0000000..495983ff
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_asm.c
-@@ -0,0 +1,748 @@
+@@ -0,0 +1,762 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -119686,8 +123195,8 @@ index 0000000..c43901f
+#include "gcc-common.h"
+#include "size_overflow.h"
+
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
++static void search_size_overflow_attribute(gimple_set *visited, tree lhs);
++static enum mark search_intentional(gimple_set *visited, const_tree lhs);
+
+// data for the size_overflow asm stmt
+struct asm_data {
@@ -119721,7 +123230,7 @@ index 0000000..c43901f
+
+static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
+{
-+ gimple asm_stmt;
++ gasm *asm_stmt;
+ gimple_stmt_iterator gsi;
+#if BUILDING_GCC_VERSION <= 4007
+ VEC(tree, gc) *input, *output = NULL;
@@ -119734,7 +123243,7 @@ index 0000000..c43901f
+ if (asm_data->output)
+ output = create_asm_io_list(str_output, asm_data->output);
+
-+ asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
++ asm_stmt = as_a_gasm(gimple_build_asm_vec(str, input, output, NULL, NULL));
+ gsi = gsi_for_stmt(asm_data->def_stmt);
+ gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
+
@@ -119749,13 +123258,13 @@ index 0000000..c43901f
+ SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
+}
+
-+static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
++static enum mark search_intentional_phi(gimple_set *visited, const_tree result)
+{
+ enum mark cur_fndecl_attr;
-+ gimple phi = get_def_stmt(result);
++ gphi *phi = as_a_gphi(get_def_stmt(result));
+ unsigned int i, n = gimple_phi_num_args(phi);
+
-+ pointer_set_insert(visited, phi);
++ pointer_set_insert(visited, (gimple)phi);
+ for (i = 0; i < n; i++) {
+ tree arg = gimple_phi_arg_def(phi, i);
+
@@ -119766,11 +123275,11 @@ index 0000000..c43901f
+ return MARK_NO;
+}
+
-+static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
++static enum mark search_intentional_binary(gimple_set *visited, const_tree lhs)
+{
+ enum mark cur_fndecl_attr;
+ const_tree rhs1, rhs2;
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+
+ rhs1 = gimple_assign_rhs1(def_stmt);
+ rhs2 = gimple_assign_rhs2(def_stmt);
@@ -119782,7 +123291,7 @@ index 0000000..c43901f
+}
+
+// Look up the intentional_overflow attribute on the caller and the callee functions.
-+static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
++static enum mark search_intentional(gimple_set *visited, const_tree lhs)
+{
+ const_gimple def_stmt;
+
@@ -119800,7 +123309,7 @@ index 0000000..c43901f
+ case GIMPLE_NOP:
+ return search_intentional(visited, SSA_NAME_VAR(lhs));
+ case GIMPLE_ASM:
-+ if (is_size_overflow_intentional_asm_turn_off(def_stmt))
++ if (is_size_overflow_intentional_asm_turn_off(as_a_const_gasm(def_stmt)))
+ return MARK_TURN_OFF;
+ return MARK_NO;
+ case GIMPLE_CALL:
@@ -119810,7 +123319,7 @@ index 0000000..c43901f
+ case GIMPLE_ASSIGN:
+ switch (gimple_num_ops(def_stmt)) {
+ case 2:
-+ return search_intentional(visited, gimple_assign_rhs1(def_stmt));
++ return search_intentional(visited, gimple_assign_rhs1(as_a_const_gassign(def_stmt)));
+ case 3:
+ return search_intentional_binary(visited, lhs);
+ }
@@ -119827,7 +123336,7 @@ index 0000000..c43901f
+static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
+{
+ const_tree fndecl;
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+ enum mark cur_fndecl_attr, decl_attr = MARK_NO;
+
+ fndecl = get_interesting_orig_fndecl(stmt, argnum);
@@ -119870,7 +123379,7 @@ index 0000000..c43901f
+ is_missing_function(orig_fndecl, num);
+}
+
-+static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
++static void search_size_overflow_attribute_phi(gimple_set *visited, const_tree result)
+{
+ gimple phi = get_def_stmt(result);
+ unsigned int i, n = gimple_phi_num_args(phi);
@@ -119883,7 +123392,7 @@ index 0000000..c43901f
+ }
+}
+
-+static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
++static void search_size_overflow_attribute_binary(gimple_set *visited, const_tree lhs)
+{
+ const_gimple def_stmt = get_def_stmt(lhs);
+ tree rhs1, rhs2;
@@ -119895,7 +123404,7 @@ index 0000000..c43901f
+ search_size_overflow_attribute(visited, rhs2);
+}
+
-+static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
++static void search_size_overflow_attribute(gimple_set *visited, tree lhs)
+{
+ const_gimple def_stmt;
+
@@ -119945,18 +123454,20 @@ index 0000000..c43901f
+{
+ tree fndecl = NULL_TREE;
+ tree lhs;
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
+ return;
+
+ if (num == 0) {
+ gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
-+ lhs = gimple_return_retval(stmt);
++ lhs = gimple_return_retval(as_a_const_greturn(stmt));
+ } else {
-+ gcc_assert(is_gimple_call(stmt));
-+ lhs = gimple_call_arg(stmt, num - 1);
-+ fndecl = gimple_call_fndecl(stmt);
++ const gcall *call = as_a_const_gcall(stmt);
++
++ gcc_assert(is_gimple_call(call));
++ lhs = gimple_call_arg(call, num - 1);
++ fndecl = gimple_call_fndecl(call);
+ }
+
+ if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
@@ -119980,9 +123491,9 @@ index 0000000..c43901f
+ asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
+ asm_data->output = make_ssa_name(asm_data->output, stmt);
+ if (gimple_code(stmt) == GIMPLE_RETURN)
-+ gimple_return_set_retval(stmt, asm_data->output);
++ gimple_return_set_retval(as_a_greturn(stmt), asm_data->output);
+ else
-+ gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
++ gimple_call_set_arg(as_a_gcall(stmt), argnum - 1, asm_data->output);
+ update_stmt(stmt);
+}
+
@@ -120062,7 +123573,7 @@ index 0000000..c43901f
+ break;
+ }
+ case GIMPLE_ASM:
-+ if (is_size_overflow_asm(asm_data->def_stmt)) {
++ if (is_size_overflow_asm(as_a_const_gasm(asm_data->def_stmt))) {
+ asm_data->input = NULL_TREE;
+ break;
+ }
@@ -120093,7 +123604,7 @@ index 0000000..c43901f
+ search_missing_size_overflow_attribute_gimple(stmt, argnum);
+
+ asm_data.def_stmt = get_def_stmt(asm_data.output);
-+ if (is_size_overflow_intentional_asm_turn_off(asm_data.def_stmt))
++ if (gimple_code(asm_data.def_stmt) == GIMPLE_ASM && is_size_overflow_intentional_asm_turn_off(as_a_const_gasm(asm_data.def_stmt)))
+ return;
+
+ create_asm_input(stmt, argnum, &asm_data);
@@ -120143,7 +123654,7 @@ index 0000000..c43901f
+ return true;
+}
+
-+static void walk_use_def_ptr(struct pointer_set_t *visited, const_tree lhs)
++static void walk_use_def_ptr(gimple_set *visited, const_tree lhs)
+{
+ gimple def_stmt;
+
@@ -120160,28 +123671,33 @@ index 0000000..c43901f
+ case GIMPLE_CALL:
+ break;
+ case GIMPLE_PHI: {
-+ unsigned int i, n = gimple_phi_num_args(def_stmt);
++ gphi *phi = as_a_gphi(def_stmt);
++ unsigned int i, n = gimple_phi_num_args(phi);
+
+ pointer_set_insert(visited, def_stmt);
+
+ for (i = 0; i < n; i++) {
-+ tree arg = gimple_phi_arg_def(def_stmt, i);
++ tree arg = gimple_phi_arg_def(phi, i);
+
+ walk_use_def_ptr(visited, arg);
+ }
++ break;
+ }
-+ case GIMPLE_ASSIGN:
-+ switch (gimple_num_ops(def_stmt)) {
++ case GIMPLE_ASSIGN: {
++ gassign *assign = as_a_gassign(def_stmt);
++
++ switch (gimple_num_ops(assign)) {
+ case 2:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
++ walk_use_def_ptr(visited, gimple_assign_rhs1(assign));
+ return;
+ case 3:
-+ walk_use_def_ptr(visited, gimple_assign_rhs1(def_stmt));
-+ walk_use_def_ptr(visited, gimple_assign_rhs2(def_stmt));
++ walk_use_def_ptr(visited, gimple_assign_rhs1(assign));
++ walk_use_def_ptr(visited, gimple_assign_rhs2(assign));
+ return;
+ default:
+ return;
+ }
++ }
+ default:
+ debug_gimple_stmt((gimple)def_stmt);
+ error("%s: unknown gimple code", __func__);
@@ -120192,7 +123708,7 @@ index 0000000..c43901f
+// Look for a ptr - ptr expression (e.g., cpuset_common_file_read() s - page)
+static void insert_mark_not_intentional_asm_at_ptr(const_tree arg)
+{
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ visited = pointer_set_create();
+ walk_use_def_ptr(visited, arg);
@@ -120200,7 +123716,7 @@ index 0000000..c43901f
+}
+
+// Determine the return value and insert the asm stmt to mark the return stmt.
-+static void insert_asm_ret(gimple stmt)
++static void insert_asm_ret(greturn *stmt)
+{
+ tree ret;
+
@@ -120209,7 +123725,7 @@ index 0000000..c43901f
+}
+
+// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
-+static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
++static void insert_asm_arg(gcall *stmt, unsigned int orig_argnum)
+{
+ tree arg;
+ unsigned int argnum;
@@ -120286,7 +123802,7 @@ index 0000000..c43901f
+ * Look up the intentional_overflow attribute that turns off ipa based duplication
+ * on the callee function.
+ */
-+static bool is_mark_turn_off_attribute(gimple stmt)
++static bool is_mark_turn_off_attribute(gcall *stmt)
+{
+ enum mark mark;
+ const_tree fndecl = gimple_call_fndecl(stmt);
@@ -120298,7 +123814,7 @@ index 0000000..c43901f
+}
+
+// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
-+static void handle_interesting_function(gimple stmt)
++static void handle_interesting_function(gcall *stmt)
+{
+ unsigned int argnum;
+ tree fndecl;
@@ -120324,7 +123840,7 @@ index 0000000..c43901f
+}
+
+// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
-+static void handle_interesting_ret(gimple stmt)
++static void handle_interesting_ret(greturn *stmt)
+{
+ bool orig_argnums[MAX_PARAM + 1] = {false};
+
@@ -120345,13 +123861,13 @@ index 0000000..c43901f
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gimple stmt = gsi_stmt(gsi);
+
-+ if (is_size_overflow_asm(stmt))
++ if (gimple_code(stmt) == GIMPLE_ASM && is_size_overflow_asm(as_a_const_gasm(stmt)))
+ continue;
+
+ if (is_gimple_call(stmt))
-+ handle_interesting_function(stmt);
++ handle_interesting_function(as_a_gcall(stmt));
+ else if (gimple_code(stmt) == GIMPLE_RETURN)
-+ handle_interesting_ret(stmt);
++ handle_interesting_ret(as_a_greturn(stmt));
+ }
+ }
+ return 0;
@@ -120363,6 +123879,7 @@ index 0000000..c43901f
+ * that the ipa pass will detect and insert the size overflow checks for.
+ */
+#if BUILDING_GCC_VERSION >= 4009
++namespace {
+static const struct pass_data insert_size_overflow_asm_pass_data = {
+#else
+static struct gimple_opt_pass insert_size_overflow_asm_pass = {
@@ -120373,7 +123890,8 @@ index 0000000..c43901f
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
-+#if BUILDING_GCC_VERSION >= 4009
++#if BUILDING_GCC_VERSION >= 5000
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -120395,34 +123913,39 @@ index 0000000..c43901f
+};
+
+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
+class insert_size_overflow_asm_pass : public gimple_opt_pass {
+public:
+ insert_size_overflow_asm_pass() : gimple_opt_pass(insert_size_overflow_asm_pass_data, g) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return search_interesting_functions(); }
++#else
+ unsigned int execute() { return search_interesting_functions(); }
++#endif
+};
+}
-+#endif
+
-+struct opt_pass *make_insert_size_overflow_asm_pass(void)
++opt_pass *make_insert_size_overflow_asm_pass(void)
+{
-+#if BUILDING_GCC_VERSION >= 4009
+ return new insert_size_overflow_asm_pass();
++}
+#else
++struct opt_pass *make_insert_size_overflow_asm_pass(void)
++{
+ return &insert_size_overflow_asm_pass.pass;
-+#endif
+}
++#endif
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
new file mode 100644
-index 0000000..73f0a12
+index 0000000..0766e39
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_core.c
-@@ -0,0 +1,943 @@
+@@ -0,0 +1,931 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -120486,19 +124009,6 @@ index 0000000..73f0a12
+ return new_type;
+}
+
-+static tree get_lhs(const_gimple stmt)
-+{
-+ switch (gimple_code(stmt)) {
-+ case GIMPLE_ASSIGN:
-+ case GIMPLE_CALL:
-+ return gimple_get_lhs(stmt);
-+ case GIMPLE_PHI:
-+ return gimple_phi_result(stmt);
-+ default:
-+ return NULL_TREE;
-+ }
-+}
-+
+static tree cast_to_new_size_overflow_type(struct visited *visited, gimple stmt, tree rhs, tree size_overflow_type, bool before)
+{
+ gimple_stmt_iterator gsi;
@@ -120572,7 +124082,7 @@ index 0000000..73f0a12
+ return cast_to_new_size_overflow_type(visited, oldstmt, rhs1, dst_type, before);
+}
+
-+tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
++tree dup_assign(struct visited *visited, gassign *oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
+{
+ gimple stmt;
+ gimple_stmt_iterator gsi;
@@ -120641,13 +124151,14 @@ index 0000000..73f0a12
+ assign = build_cast_stmt(visited, size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
+
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
+static tree use_phi_ssa_name(struct visited *visited, tree ssa_name_var, tree new_arg)
+{
+ gimple_stmt_iterator gsi;
-+ gimple assign, def_stmt = get_def_stmt(new_arg);
++ gimple assign;
++ gimple def_stmt = get_def_stmt(new_arg);
+
+ if (gimple_code(def_stmt) == GIMPLE_PHI) {
+ gsi = gsi_after_labels(gimple_bb(def_stmt));
@@ -120658,7 +124169,7 @@ index 0000000..73f0a12
+ }
+
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
+static tree cast_visited_phi_arg(struct visited *visited, tree ssa_name_var, tree arg, tree size_overflow_type)
@@ -120675,13 +124186,12 @@ index 0000000..73f0a12
+
+ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
-+static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
++static tree create_new_phi_arg(struct visited *visited, tree ssa_name_var, tree new_arg, gphi *oldstmt, unsigned int i)
+{
-+ tree size_overflow_type;
-+ tree arg;
++ tree size_overflow_type, arg;
+ const_gimple def_stmt;
+
+ if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
@@ -120698,7 +124208,7 @@ index 0000000..73f0a12
+ case GIMPLE_NOP: {
+ basic_block bb;
+
-+ bb = gimple_phi_arg_edge(oldstmt, i)->src;
++ bb = gimple_phi_arg_edge(as_a_gphi(oldstmt), i)->src;
+ return cast_parm_decl(visited, ssa_name_var, arg, size_overflow_type, bb);
+ }
+ case GIMPLE_ASM: {
@@ -120708,7 +124218,7 @@ index 0000000..73f0a12
+ gsi = gsi_for_stmt(stmt);
+ assign = build_cast_stmt(visited, size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+ }
+ default:
+ gcc_assert(new_arg != NULL_TREE);
@@ -120717,10 +124227,10 @@ index 0000000..73f0a12
+ }
+}
+
-+static gimple overflow_create_phi_node(struct visited *visited, gimple oldstmt, tree result)
++static gphi *overflow_create_phi_node(struct visited *visited, gphi *oldstmt, tree result)
+{
+ basic_block bb;
-+ gimple phi;
++ gphi *phi;
+ gimple_seq seq;
+ gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
+
@@ -120733,7 +124243,7 @@ index 0000000..73f0a12
+ result = create_new_var(size_overflow_type);
+ }
+
-+ phi = create_phi_node(result, bb);
++ phi = as_a_gphi(create_phi_node(result, bb));
+ gimple_phi_set_result(phi, make_ssa_name(result, phi));
+ seq = phi_nodes(bb);
+ gsi = gsi_last(seq);
@@ -120746,12 +124256,12 @@ index 0000000..73f0a12
+}
+
+#if BUILDING_GCC_VERSION <= 4007
-+static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gimple oldstmt)
++static tree create_new_phi_node(struct visited *visited, VEC(tree, heap) **args, tree ssa_name_var, gphi *oldstmt)
+#else
-+static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gimple oldstmt)
++static tree create_new_phi_node(struct visited *visited, vec<tree, va_heap, vl_embed> *&args, tree ssa_name_var, gphi *oldstmt)
+#endif
+{
-+ gimple new_phi;
++ gphi *new_phi;
+ unsigned int i;
+ tree arg, result;
+ location_t loc = gimple_location(oldstmt);
@@ -120793,7 +124303,7 @@ index 0000000..73f0a12
+#else
+ vec<tree, va_heap, vl_embed> *args = NULL;
+#endif
-+ gimple oldstmt = get_def_stmt(orig_result);
++ gphi *oldstmt = as_a_gphi(get_def_stmt(orig_result));
+ unsigned int i, len = gimple_phi_num_args(oldstmt);
+
+ pointer_set_insert(visited->stmts, oldstmt);
@@ -120826,7 +124336,7 @@ index 0000000..73f0a12
+#endif
+}
+
-+static tree create_cast_assign(struct visited *visited, gimple stmt)
++static tree create_cast_assign(struct visited *visited, gassign *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree lhs = gimple_assign_lhs(stmt);
@@ -120839,7 +124349,7 @@ index 0000000..73f0a12
+ return create_assign(visited, stmt, rhs1, AFTER_STMT);
+}
+
-+static bool skip_lhs_cast_check(const_gimple stmt)
++static bool skip_lhs_cast_check(const gassign *stmt)
+{
+ const_tree rhs = gimple_assign_rhs1(stmt);
+ const_gimple def_stmt = get_def_stmt(rhs);
@@ -120873,7 +124383,7 @@ index 0000000..73f0a12
+
+static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
+{
-+ gimple cond_stmt;
++ gcond *cond_stmt;
+ gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
+
+ cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
@@ -120883,7 +124393,7 @@ index 0000000..73f0a12
+
+static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
+{
-+ gimple func_stmt;
++ gcall *func_stmt;
+ const_gimple def_stmt;
+ const_tree loc_line;
+ tree loc_file, ssa_name, current_func;
@@ -120921,7 +124431,7 @@ index 0000000..73f0a12
+ ssa_name = create_string_param(ssa_name);
+
+ // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
-+ func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
++ func_stmt = as_a_gcall(gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name));
+ gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
+
+ callee_node = cgraph_get_create_node(report_size_overflow_decl);
@@ -121005,7 +124515,7 @@ index 0000000..73f0a12
+ insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
+}
+
-+static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
++static tree create_cast_overflow_check(struct visited *visited, struct cgraph_node *caller_node, tree new_rhs1, gassign *stmt)
+{
+ bool cast_lhs, cast_rhs;
+ tree lhs = gimple_assign_lhs(stmt);
@@ -121058,7 +124568,7 @@ index 0000000..73f0a12
+ return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
+}
+
-+static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++static tree handle_unary_rhs(struct visited *visited, struct cgraph_node *caller_node, gassign *stmt)
+{
+ enum tree_code rhs_code;
+ tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
@@ -121093,10 +124603,10 @@ index 0000000..73f0a12
+ return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
+}
+
-+static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gimple stmt)
++static tree handle_unary_ops(struct visited *visited, struct cgraph_node *caller_node, gassign *stmt)
+{
+ tree rhs1, lhs = gimple_assign_lhs(stmt);
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+
+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
+ rhs1 = gimple_assign_rhs1(def_stmt);
@@ -121155,7 +124665,7 @@ index 0000000..73f0a12
+}
+
+// Skip duplication when there is a minus expr and the type of rhs1 or rhs2 is a pointer_type.
-+static bool is_a_ptr_minus(gimple stmt)
++static bool is_a_ptr_minus(gassign *stmt)
+{
+ const_tree rhs1, rhs2, ptr1_rhs, ptr2_rhs;
+
@@ -121183,7 +124693,7 @@ index 0000000..73f0a12
+{
+ enum intentional_overflow_type res;
+ tree rhs1, rhs2, new_lhs;
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+
@@ -121224,13 +124734,13 @@ index 0000000..73f0a12
+ res = add_mul_intentional_overflow(def_stmt);
+ if (res != NO_INTENTIONAL_OVERFLOW) {
+ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+ insert_cast_expr(visited, get_def_stmt(new_lhs), res);
++ insert_cast_expr(visited, as_a_gassign(get_def_stmt(new_lhs)), res);
+ return new_lhs;
+ }
+
+ if (skip_expr_on_double_type(def_stmt)) {
+ new_lhs = dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
-+ insert_cast_expr(visited, get_def_stmt(new_lhs), NO_INTENTIONAL_OVERFLOW);
++ insert_cast_expr(visited, as_a_gassign(get_def_stmt(new_lhs)), NO_INTENTIONAL_OVERFLOW);
+ return new_lhs;
+ }
+
@@ -121267,7 +124777,7 @@ index 0000000..73f0a12
+static tree handle_ternary_ops(struct visited *visited, struct cgraph_node *caller_node, tree lhs)
+{
+ tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+
+ size_overflow_type = get_size_overflow_type(visited, def_stmt, lhs);
+
@@ -121346,7 +124856,7 @@ index 0000000..73f0a12
+ case GIMPLE_ASSIGN:
+ switch (gimple_num_ops(def_stmt)) {
+ case 2:
-+ return handle_unary_ops(visited, caller_node, def_stmt);
++ return handle_unary_ops(visited, caller_node, as_a_gassign(def_stmt));
+ case 3:
+ return handle_binary_ops(visited, caller_node, lhs);
+#if BUILDING_GCC_VERSION >= 4006
@@ -121363,16 +124873,17 @@ index 0000000..73f0a12
+
diff --git a/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
new file mode 100644
-index 0000000..df50164
+index 0000000..e1e6e19
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/insert_size_overflow_check_ipa.c
-@@ -0,0 +1,1141 @@
+@@ -0,0 +1,1157 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ * https://github.com/ephox-gcc-plugins
+ *
+ * Documentation:
+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
@@ -121396,8 +124907,8 @@ index 0000000..df50164
+
+unsigned int call_count;
+
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
++static void set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree lhs);
++static void walk_use_def(gimple_set *visited, struct interesting_node *cur_node, tree lhs);
+
+struct visited_fns {
+ struct visited_fns *next;
@@ -121567,9 +125078,9 @@ index 0000000..df50164
+ return cnodes;
+}
+
-+static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
++static void walk_phi_set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree result)
+{
-+ gimple phi = get_def_stmt(result);
++ gphi *phi = as_a_gphi(get_def_stmt(result));
+ unsigned int i, n = gimple_phi_num_args(phi);
+
+ pointer_set_insert(visited, phi);
@@ -121585,7 +125096,7 @@ index 0000000..df50164
+};
+
+// Search for constants, cast assignments and binary/ternary assignments
-+static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
++static void set_conditions(gimple_set *visited, bool *interesting_conditions, const_tree lhs)
+{
+ gimple def_stmt = get_def_stmt(lhs);
+
@@ -121602,7 +125113,7 @@ index 0000000..df50164
+
+ switch (gimple_code(def_stmt)) {
+ case GIMPLE_CALL:
-+ if (lhs == gimple_call_lhs(def_stmt))
++ if (lhs == gimple_call_lhs(as_a_const_gcall(def_stmt)))
+ interesting_conditions[RET] = true;
+ return;
+ case GIMPLE_NOP:
@@ -121611,11 +125122,13 @@ index 0000000..df50164
+ case GIMPLE_PHI:
+ interesting_conditions[PHI] = true;
+ return walk_phi_set_conditions(visited, interesting_conditions, lhs);
-+ case GIMPLE_ASSIGN:
-+ if (gimple_num_ops(def_stmt) == 2) {
-+ const_tree rhs = gimple_assign_rhs1(def_stmt);
++ case GIMPLE_ASSIGN: {
++ gassign *assign = as_a_gassign(def_stmt);
++
++ if (gimple_num_ops(assign) == 2) {
++ const_tree rhs = gimple_assign_rhs1(assign);
+
-+ if (gimple_assign_cast_p(def_stmt))
++ if (gimple_assign_cast_p(assign))
+ interesting_conditions[CAST] = true;
+
+ return set_conditions(visited, interesting_conditions, rhs);
@@ -121623,6 +125136,7 @@ index 0000000..df50164
+ interesting_conditions[NOT_UNARY] = true;
+ return;
+ }
++ }
+ default:
+ debug_gimple_stmt(def_stmt);
+ gcc_unreachable();
@@ -121632,7 +125146,7 @@ index 0000000..df50164
+// determine whether duplication will be necessary or not.
+static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
+{
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ if (gimple_assign_cast_p(cur_node->first_stmt))
+ interesting_conditions[CAST] = true;
@@ -121645,9 +125159,9 @@ index 0000000..df50164
+}
+
+// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
-+static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
++static void replace_size_overflow_asm_with_assign(gasm *asm_stmt, tree lhs, tree rhs)
+{
-+ gimple assign;
++ gassign *assign;
+ gimple_stmt_iterator gsi;
+
+ // already removed
@@ -121688,13 +125202,13 @@ index 0000000..df50164
+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+ return false;
+
-+ def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
++ def_stmt = get_def_stmt(gimple_assign_rhs1(as_a_gassign(def_stmt)));
+ return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
+}
+
-+static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
++static void walk_use_def_phi(gimple_set *visited, struct interesting_node *cur_node, tree result)
+{
-+ gimple phi = get_def_stmt(result);
++ gphi *phi = as_a_gphi(get_def_stmt(result));
+ unsigned int i, n = gimple_phi_num_args(phi);
+
+ pointer_set_insert(visited, phi);
@@ -121705,9 +125219,9 @@ index 0000000..df50164
+ }
+}
+
-+static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++static void walk_use_def_binary(gimple_set *visited, struct interesting_node *cur_node, tree lhs)
+{
-+ gimple def_stmt = get_def_stmt(lhs);
++ gassign *def_stmt = as_a_gassign(get_def_stmt(lhs));
+ tree rhs1, rhs2;
+
+ rhs1 = gimple_assign_rhs1(def_stmt);
@@ -121756,16 +125270,16 @@ index 0000000..df50164
+}
+
+// a size_overflow asm stmt in the control flow doesn't stop the recursion
-+static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
++static void handle_asm_stmt(gimple_set *visited, struct interesting_node *cur_node, tree lhs, const gasm *stmt)
+{
-+ if (!is_size_overflow_asm(stmt))
++ if (gimple_code(stmt) != GIMPLE_ASM || !is_size_overflow_asm(stmt))
+ walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+}
+
+/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
+ * and component refs (for checking the intentional_overflow attribute).
+ */
-+static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
++static void walk_use_def(gimple_set *visited, struct interesting_node *cur_node, tree lhs)
+{
+ const_gimple def_stmt;
+
@@ -121785,9 +125299,9 @@ index 0000000..df50164
+ case GIMPLE_NOP:
+ return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
+ case GIMPLE_ASM:
-+ return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
++ return handle_asm_stmt(visited, cur_node, lhs, as_a_const_gasm(def_stmt));
+ case GIMPLE_CALL: {
-+ tree fndecl = gimple_call_fndecl(def_stmt);
++ tree fndecl = gimple_call_fndecl(as_a_const_gcall(def_stmt));
+
+ if (fndecl == NULL_TREE)
+ return;
@@ -121799,7 +125313,7 @@ index 0000000..df50164
+ case GIMPLE_ASSIGN:
+ switch (gimple_num_ops(def_stmt)) {
+ case 2:
-+ return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
++ return walk_use_def(visited, cur_node, gimple_assign_rhs1(as_a_const_gassign(def_stmt)));
+ case 3:
+ return walk_use_def_binary(visited, cur_node, lhs);
+ }
@@ -121813,7 +125327,7 @@ index 0000000..df50164
+// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
+static void set_last_nodes(struct interesting_node *cur_node)
+{
-+ struct pointer_set_t *visited;
++ gimple_set *visited;
+
+ visited = pointer_set_create();
+ walk_use_def(visited, cur_node, cur_node->node);
@@ -121866,7 +125380,7 @@ index 0000000..df50164
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+
+ assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
+static void change_orig_node(struct visited *visited, struct interesting_node *cur_node, tree new_node)
@@ -121877,10 +125391,10 @@ index 0000000..df50164
+
+ switch (gimple_code(stmt)) {
+ case GIMPLE_RETURN:
-+ gimple_return_set_retval(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ gimple_return_set_retval(as_a_greturn(stmt), cast_to_orig_type(visited, stmt, orig_node, new_node));
+ break;
+ case GIMPLE_CALL:
-+ gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ gimple_call_set_arg(as_a_gcall(stmt), cur_node->num - 1, cast_to_orig_type(visited, stmt, orig_node, new_node));
+ break;
+ case GIMPLE_ASSIGN:
+ switch (cur_node->num) {
@@ -121899,7 +125413,7 @@ index 0000000..df50164
+ gcc_unreachable();
+ }
+
-+ set_rhs(stmt, cast_to_orig_type(visited, stmt, orig_node, new_node));
++ set_rhs(as_a_gassign(stmt), cast_to_orig_type(visited, stmt, orig_node, new_node));
+ break;
+ default:
+ debug_gimple_stmt(stmt);
@@ -121986,7 +125500,7 @@ index 0000000..df50164
+ intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
+ intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
+ */
-+static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
++static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gasm *asm_stmt)
+{
+ struct interesting_node *new_node;
+ tree fndecl;
@@ -122006,7 +125520,7 @@ index 0000000..df50164
+ return head;
+
+ if (is_gimple_call(first_stmt))
-+ fndecl = gimple_call_fndecl(first_stmt);
++ fndecl = gimple_call_fndecl(as_a_const_gcall(first_stmt));
+ else
+ fndecl = current_function_decl;
+
@@ -122042,7 +125556,7 @@ index 0000000..df50164
+/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
+ * If the ret stmt is in the next cgraph node list then it's an interesting ret.
+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, greturn *stmt, struct next_cgraph_node *next_node)
+{
+ struct next_cgraph_node *cur_node;
+ tree ret = gimple_return_retval(stmt);
@@ -122063,7 +125577,7 @@ index 0000000..df50164
+/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
+ * If the call stmt is in the next cgraph node list then it's an interesting call.
+ */
-+static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
++static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gcall *stmt, struct next_cgraph_node *next_node)
+{
+ unsigned int argnum;
+ tree arg;
@@ -122099,7 +125613,7 @@ index 0000000..df50164
+}
+
+// Get the index of the rhs node in an assignment
-+static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
++static unsigned int get_assign_ops_count(const gassign *stmt, tree node)
+{
+ const_tree rhs1, rhs2;
+ unsigned int ret;
@@ -122127,7 +125641,7 @@ index 0000000..df50164
+}
+
+// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
-+static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
++static unsigned int find_arg_number_gimple(const_tree arg, const gcall *stmt)
+{
+ unsigned int i;
+
@@ -122150,7 +125664,7 @@ index 0000000..df50164
+/* starting from the size_overflow asm stmt collect interesting stmts. They can be
+ * any of return, call or assignment stmts (because of inlining).
+ */
-+static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
++static struct interesting_node *get_interesting_ret_or_call(tree_set *visited, struct interesting_node *head, tree node, gasm *intentional_asm)
+{
+ use_operand_p use_p;
+ imm_use_iterator imm_iter;
@@ -122171,28 +125685,31 @@ index 0000000..df50164
+
+ switch (gimple_code(stmt)) {
+ case GIMPLE_CALL:
-+ argnum = find_arg_number_gimple(node, stmt);
++ argnum = find_arg_number_gimple(node, as_a_gcall(stmt));
+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
+ break;
+ case GIMPLE_RETURN:
+ head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
+ break;
+ case GIMPLE_ASSIGN:
-+ argnum = get_assign_ops_count(stmt, node);
++ argnum = get_assign_ops_count(as_a_const_gassign(stmt), node);
+ head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
+ break;
+ case GIMPLE_PHI: {
-+ tree result = gimple_phi_result(stmt);
++ tree result = gimple_phi_result(as_a_gphi(stmt));
+ head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
+ break;
+ }
-+ case GIMPLE_ASM:
-+ if (gimple_asm_noutputs(stmt) != 0)
++ case GIMPLE_ASM: {
++ gasm *asm_stmt = as_a_gasm(stmt);
++
++ if (gimple_asm_noutputs(asm_stmt) != 0)
+ break;
-+ if (!is_size_overflow_asm(stmt))
++ if (!is_size_overflow_asm(asm_stmt))
+ break;
-+ head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
++ head = create_new_interesting_node(head, asm_stmt, node, 1, intentional_asm);
+ break;
++ }
+ case GIMPLE_COND:
+ case GIMPLE_SWITCH:
+ break;
@@ -122207,66 +125724,71 @@ index 0000000..df50164
+
+static void remove_size_overflow_asm(gimple stmt)
+{
++ gasm *asm_stmt;
+ gimple_stmt_iterator gsi;
+ tree input, output;
+
-+ if (!is_size_overflow_asm(stmt))
++ if (gimple_code(stmt) != GIMPLE_ASM)
+ return;
+
-+ if (gimple_asm_noutputs(stmt) == 0) {
-+ gsi = gsi_for_stmt(stmt);
-+ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), stmt);
++ asm_stmt = as_a_gasm(stmt);
++ if (!is_size_overflow_asm(asm_stmt))
++ return;
++
++ if (gimple_asm_noutputs(asm_stmt) == 0) {
++ gsi = gsi_for_stmt(asm_stmt);
++ ipa_remove_stmt_references(cgraph_get_create_node(current_function_decl), asm_stmt);
+ gsi_remove(&gsi, true);
+ return;
+ }
+
-+ input = gimple_asm_input_op(stmt, 0);
-+ output = gimple_asm_output_op(stmt, 0);
-+ replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
++ input = gimple_asm_input_op(asm_stmt, 0);
++ output = gimple_asm_output_op(asm_stmt, 0);
++ replace_size_overflow_asm_with_assign(asm_stmt, TREE_VALUE(output), TREE_VALUE(input));
+}
+
+/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
+ * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
+ * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
+ */
-+static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
++static struct interesting_node *handle_stmt_by_size_overflow_asm(gasm *asm_stmt, struct interesting_node *head)
+{
+ const_tree output;
-+ struct pointer_set_t *visited;
-+ gimple intentional_asm = NOT_INTENTIONAL_ASM;
++ tree_set *visited;
++ gasm *intentional_asm = NOT_INTENTIONAL_ASM;
+
-+ if (!is_size_overflow_asm(stmt))
++ if (!is_size_overflow_asm(asm_stmt))
+ return head;
+
-+ if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
-+ intentional_asm = stmt;
++ if (is_size_overflow_intentional_asm_yes(asm_stmt) || is_size_overflow_intentional_asm_turn_off(asm_stmt))
++ intentional_asm = asm_stmt;
+
-+ gcc_assert(gimple_asm_ninputs(stmt) == 1);
++ gcc_assert(gimple_asm_ninputs(asm_stmt) == 1);
+
-+ if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
++ if (gimple_asm_noutputs(asm_stmt) == 0 && is_size_overflow_intentional_asm_turn_off(asm_stmt))
+ return head;
+
-+ if (gimple_asm_noutputs(stmt) == 0) {
++ if (gimple_asm_noutputs(asm_stmt) == 0) {
+ const_tree input;
+
-+ if (!is_size_overflow_intentional_asm_turn_off(stmt))
++ if (!is_size_overflow_intentional_asm_turn_off(asm_stmt))
+ return head;
+
-+ input = gimple_asm_input_op(stmt, 0);
-+ remove_size_overflow_asm(stmt);
++ input = gimple_asm_input_op(asm_stmt, 0);
++ remove_size_overflow_asm(asm_stmt);
+ if (is_gimple_constant(TREE_VALUE(input)))
+ return head;
-+ visited = pointer_set_create();
++ visited = tree_pointer_set_create();
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
+ pointer_set_destroy(visited);
+ return head;
+ }
+
-+ if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
-+ remove_size_overflow_asm(stmt);
++ if (!is_size_overflow_intentional_asm_yes(asm_stmt) && !is_size_overflow_intentional_asm_turn_off(asm_stmt))
++ remove_size_overflow_asm(asm_stmt);
+
-+ visited = pointer_set_create();
-+ output = gimple_asm_output_op(stmt, 0);
++ visited = tree_pointer_set_create();
++ output = gimple_asm_output_op(asm_stmt, 0);
+ head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
+ pointer_set_destroy(visited);
+ return head;
@@ -122290,14 +125812,14 @@ index 0000000..df50164
+ code = gimple_code(stmt);
+
+ if (code == GIMPLE_ASM)
-+ head = handle_stmt_by_size_overflow_asm(stmt, head);
++ head = handle_stmt_by_size_overflow_asm(as_a_gasm(stmt), head);
+
+ if (!next_node)
+ continue;
+ if (code == GIMPLE_CALL)
-+ head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
++ head = handle_stmt_by_cgraph_nodes_call(head, as_a_gcall(stmt), next_node);
+ if (code == GIMPLE_RETURN)
-+ head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
++ head = handle_stmt_by_cgraph_nodes_ret(head, as_a_greturn(stmt), next_node);
+ }
+ }
+ return head;
@@ -122434,7 +125956,6 @@ index 0000000..df50164
+ struct visited_fns *visited_fns = NULL;
+
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
-+ gcc_assert(cgraph_function_flags_ready);
+#if BUILDING_GCC_VERSION <= 4007
+ gcc_assert(node->reachable);
+#endif
@@ -122447,6 +125968,7 @@ index 0000000..df50164
+}
+
+#if BUILDING_GCC_VERSION >= 4009
++namespace {
+static const struct pass_data insert_size_overflow_check_data = {
+#else
+static struct ipa_opt_pass_d insert_size_overflow_check = {
@@ -122457,7 +125979,8 @@ index 0000000..df50164
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
-+#if BUILDING_GCC_VERSION >= 4009
++#if BUILDING_GCC_VERSION >= 5000
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -122490,36 +126013,40 @@ index 0000000..df50164
+};
+
+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
+class insert_size_overflow_check : public ipa_opt_pass_d {
+public:
+ insert_size_overflow_check() : ipa_opt_pass_d(insert_size_overflow_check_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return search_function(); }
++#else
+ unsigned int execute() { return search_function(); }
++#endif
+};
+}
-+#endif
+
-+struct opt_pass *make_insert_size_overflow_check(void)
++opt_pass *make_insert_size_overflow_check(void)
+{
-+#if BUILDING_GCC_VERSION >= 4009
+ return new insert_size_overflow_check();
++}
+#else
++struct opt_pass *make_insert_size_overflow_check(void)
++{
+ return &insert_size_overflow_check.pass;
-+#endif
+}
-+
++#endif
diff --git a/tools/gcc/size_overflow_plugin/intentional_overflow.c b/tools/gcc/size_overflow_plugin/intentional_overflow.c
new file mode 100644
-index 0000000..d71d72a
+index 0000000..eb62680
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/intentional_overflow.c
-@@ -0,0 +1,736 @@
+@@ -0,0 +1,748 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
++ * https://github.com/ephox-gcc-plugins
+ *
+ * Documentation:
+ * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
@@ -122564,7 +126091,7 @@ index 0000000..d71d72a
+ if (param_head == NULL_TREE)
+ return false;
+
-+ if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
++ if (tree_to_shwi(TREE_VALUE(param_head)) == -1)
+ return true;
+ return false;
+}
@@ -122756,13 +126283,15 @@ index 0000000..d71d72a
+{
+ const_tree rhs1, lhs, rhs1_type, lhs_type;
+ enum machine_mode lhs_mode, rhs_mode;
++ const gassign *assign;
+ gimple def_stmt = get_def_stmt(no_const_rhs);
+
+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ lhs = gimple_assign_lhs(def_stmt);
++ assign = as_a_const_gassign(def_stmt);
++ rhs1 = gimple_assign_rhs1(assign);
++ lhs = gimple_assign_lhs(assign);
+ rhs1_type = TREE_TYPE(rhs1);
+ lhs_type = TREE_TYPE(lhs);
+ rhs_mode = TYPE_MODE(rhs1_type);
@@ -122786,7 +126315,7 @@ index 0000000..d71d72a
+ return num;
+ if (is_gimple_debug(use_stmt))
+ continue;
-+ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(use_stmt)))
++ if (gimple_assign_cast_p(use_stmt) && is_size_overflow_type(gimple_assign_lhs(as_a_const_gassign(use_stmt))))
+ continue;
+ num++;
+ }
@@ -122802,12 +126331,14 @@ index 0000000..d71d72a
+bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
+{
+ tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
++ gassign *assign;
+ gimple def_stmt = get_def_stmt(lhs);
+
+ if (!def_stmt || !gimple_assign_cast_p(def_stmt))
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
++ assign = as_a_gassign(def_stmt);
++ rhs1 = gimple_assign_rhs1(assign);
+ rhs_type = TREE_TYPE(rhs1);
+ lhs_type = TREE_TYPE(lhs);
+ if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
@@ -122819,11 +126350,12 @@ index 0000000..d71d72a
+ if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
+ return false;
+
-+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
++ assign = as_a_gassign(def_stmt);
++ if (gimple_assign_rhs_code(assign) != PLUS_EXPR)
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
-+ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs1 = gimple_assign_rhs1(assign);
++ rhs2 = gimple_assign_rhs2(assign);
+ if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
+ return false;
+
@@ -122880,7 +126412,7 @@ index 0000000..d71d72a
+ return false;
+}
+
-+bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
++bool is_a_constant_overflow(const gassign *stmt, const_tree rhs)
+{
+ if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
+ return false;
@@ -122894,7 +126426,7 @@ index 0000000..d71d72a
+ return true;
+}
+
-+static tree change_assign_rhs(struct visited *visited, gimple stmt, const_tree orig_rhs, tree new_rhs)
++static tree change_assign_rhs(struct visited *visited, gassign *stmt, const_tree orig_rhs, tree new_rhs)
+{
+ gimple assign;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
@@ -122904,10 +126436,10 @@ index 0000000..d71d72a
+
+ assign = build_cast_stmt(visited, origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
+ pointer_set_insert(visited->my_stmts, assign);
-+ return gimple_assign_lhs(assign);
++ return get_lhs(assign);
+}
+
-+tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
++tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gassign *stmt, tree change_rhs, tree new_rhs2)
+{
+ tree new_rhs, orig_rhs;
+ void (*gimple_assign_set_rhs)(gimple, tree);
@@ -122938,9 +126470,10 @@ index 0000000..d71d72a
+ return create_assign(visited, stmt, lhs, AFTER_STMT);
+}
+
-+static bool is_subtraction_special(struct visited *visited, const_gimple stmt)
++static bool is_subtraction_special(struct visited *visited, const gassign *stmt)
+{
-+ gimple rhs1_def_stmt, rhs2_def_stmt;
++ gimple def_stmt_1, def_stmt_2;
++ const gassign *rhs1_def_stmt, *rhs2_def_stmt;
+ const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
+ enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
+ const_tree rhs1 = gimple_assign_rhs1(stmt);
@@ -122954,15 +126487,18 @@ index 0000000..d71d72a
+ if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
+ return false;
+
-+ rhs1_def_stmt = get_def_stmt(rhs1);
-+ rhs2_def_stmt = get_def_stmt(rhs2);
-+ if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
++ def_stmt_1 = get_def_stmt(rhs1);
++ def_stmt_2 = get_def_stmt(rhs2);
++ if (!gimple_assign_cast_p(def_stmt_1) || !gimple_assign_cast_p(def_stmt_2))
+ return false;
+
++ rhs1_def_stmt = as_a_const_gassign(def_stmt_1);
++ rhs2_def_stmt = as_a_const_gassign(def_stmt_2);
+ rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
+ rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
+ rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
+ rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
++
+ rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
+ rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
+ rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
@@ -122977,15 +126513,15 @@ index 0000000..d71d72a
+ return true;
+}
+
-+static gimple create_binary_assign(struct visited *visited, enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
++static gassign *create_binary_assign(struct visited *visited, enum tree_code code, gassign *stmt, tree rhs1, tree rhs2)
+{
-+ gimple assign;
++ gassign *assign;
+ gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
+ tree type = TREE_TYPE(rhs1);
+ tree lhs = create_new_var(type);
+
+ gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
-+ assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
++ assign = as_a_gassign(gimple_build_assign_with_ops(code, lhs, rhs1, rhs2));
+ gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
+
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
@@ -123005,11 +126541,11 @@ index 0000000..d71d72a
+
+ gsi = gsi_for_stmt(stmt);
+ cast_stmt = build_cast_stmt(visited, intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ pointer_set_insert(visited->my_stmts, cast_stmt);
-+ return gimple_assign_lhs(cast_stmt);
++ pointer_set_insert(visited->my_stmts, (gimple)cast_stmt);
++ return get_lhs(cast_stmt);
+}
+
-+static tree get_def_stmt_rhs(struct visited *visited, const_tree var)
++static tree get_def_stmt_rhs(const_tree var)
+{
+ tree rhs1, def_stmt_rhs1;
+ gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
@@ -123017,14 +126553,13 @@ index 0000000..d71d72a
+ def_stmt = get_def_stmt(var);
+ if (!gimple_assign_cast_p(def_stmt))
+ return NULL_TREE;
-+ gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && pointer_set_contains(visited->my_stmts, def_stmt) && gimple_assign_cast_p(def_stmt));
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1 = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
+ rhs1_def_stmt = get_def_stmt(rhs1);
+ if (!gimple_assign_cast_p(rhs1_def_stmt))
+ return rhs1;
+
-+ def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
++ def_stmt_rhs1 = gimple_assign_rhs1(as_a_const_gassign(rhs1_def_stmt));
+ def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
+
+ switch (gimple_code(def_stmt_rhs1_def_stmt)) {
@@ -123045,7 +126580,7 @@ index 0000000..d71d72a
+{
+ tree new_rhs1, new_rhs2;
+ tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
-+ gimple assign, stmt = get_def_stmt(lhs);
++ gassign *assign, *stmt = as_a_gassign(get_def_stmt(lhs));
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree rhs2 = gimple_assign_rhs2(stmt);
+
@@ -123055,8 +126590,8 @@ index 0000000..d71d72a
+ new_rhs1 = expand(visited, caller_node, rhs1);
+ new_rhs2 = expand(visited, caller_node, rhs2);
+
-+ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs1);
-+ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(visited, new_rhs2);
++ new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
++ new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
+
+ if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
+ return NULL_TREE;
@@ -123099,6 +126634,7 @@ index 0000000..d71d72a
+ const_tree res;
+ tree rhs1, rhs2, def_rhs1, def_rhs2, const_rhs, def_const_rhs;
+ const_gimple def_stmt;
++ const gassign *assign, *def_assign;
+
+ if (!stmt || gimple_code(stmt) == GIMPLE_NOP)
+ return false;
@@ -123107,8 +126643,9 @@ index 0000000..d71d72a
+ if (gimple_assign_rhs_code(stmt) != MULT_EXPR)
+ return false;
+
-+ rhs1 = gimple_assign_rhs1(stmt);
-+ rhs2 = gimple_assign_rhs2(stmt);
++ assign = as_a_const_gassign(stmt);
++ rhs1 = gimple_assign_rhs1(assign);
++ rhs2 = gimple_assign_rhs2(assign);
+ if (is_gimple_constant(rhs1)) {
+ const_rhs = rhs1;
+ def_stmt = get_def_stmt(rhs2);
@@ -123124,8 +126661,9 @@ index 0000000..d71d72a
+ if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR && gimple_assign_rhs_code(def_stmt) != MINUS_EXPR)
+ return false;
+
-+ def_rhs1 = gimple_assign_rhs1(def_stmt);
-+ def_rhs2 = gimple_assign_rhs2(def_stmt);
++ def_assign = as_a_const_gassign(def_stmt);
++ def_rhs1 = gimple_assign_rhs1(def_assign);
++ def_rhs2 = gimple_assign_rhs2(def_assign);
+ if (is_gimple_constant(def_rhs1))
+ def_const_rhs = def_rhs1;
+ else if (is_gimple_constant(def_rhs2))
@@ -123133,13 +126671,13 @@ index 0000000..d71d72a
+ else
+ return false;
+
-+ res = fold_binary_loc(gimple_location(def_stmt), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
++ res = fold_binary_loc(gimple_location(def_assign), MULT_EXPR, TREE_TYPE(const_rhs), const_rhs, def_const_rhs);
+ if (is_lt_signed_type_max(res) && is_gt_zero(res))
+ return false;
+ return true;
+}
+
-+enum intentional_overflow_type add_mul_intentional_overflow(const_gimple stmt)
++enum intentional_overflow_type add_mul_intentional_overflow(const gassign *stmt)
+{
+ const_gimple def_stmt_1, def_stmt_2;
+ const_tree rhs1, rhs2;
@@ -123205,17 +126743,17 @@ index 0000000..d71d72a
+
+ if (!is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 2)
+ return false;
-+ rhs = gimple_assign_rhs1(def_stmt);
++ rhs = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
+ def_stmt = get_def_stmt(rhs);
+ if (!def_stmt)
+ return false;
+ return is_call_or_cast(def_stmt);
+}
+
-+void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt)
++void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt)
+{
+ unsigned int use_num;
-+ gimple so_stmt;
++ gassign *so_stmt;
+ const_gimple def_stmt;
+ const_tree rhs1, rhs2;
+ tree rhs = gimple_assign_rhs1(stmt);
@@ -123236,31 +126774,32 @@ index 0000000..d71d72a
+ if (!is_gimple_assign(def_stmt))
+ return;
+
-+ rhs1 = gimple_assign_rhs1(def_stmt);
++ rhs1 = gimple_assign_rhs1(as_a_const_gassign(def_stmt));
+ if (!is_unsigned_cast_or_call_def_stmt(rhs1))
+ return;
+
-+ rhs2 = gimple_assign_rhs2(def_stmt);
++ rhs2 = gimple_assign_rhs2(as_a_const_gassign(def_stmt));
+ if (!is_unsigned_cast_or_call_def_stmt(rhs2))
+ return;
+ if (gimple_num_ops(def_stmt) == 3 && !is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
+ return;
+
-+ so_stmt = get_dup_stmt(visited, stmt);
++ so_stmt = as_a_gassign(get_dup_stmt(visited, stmt));
+ create_up_and_down_cast(visited, so_stmt, lhs_type, gimple_assign_rhs1(so_stmt));
+}
+
diff --git a/tools/gcc/size_overflow_plugin/misc.c b/tools/gcc/size_overflow_plugin/misc.c
new file mode 100644
-index 0000000..4bddad2
+index 0000000..253b4a8b
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/misc.c
-@@ -0,0 +1,203 @@
+@@ -0,0 +1,219 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -123294,6 +126833,20 @@ index 0000000..4bddad2
+ current_function_decl = NULL_TREE;
+}
+
++tree get_lhs(const_gimple stmt)
++{
++ switch (gimple_code(stmt)) {
++ case GIMPLE_ASSIGN:
++ case GIMPLE_CALL:
++ return gimple_get_lhs(as_a_const_gassign(stmt));
++ case GIMPLE_PHI:
++ return gimple_phi_result(as_a_const_gphi(stmt));
++ default:
++ debug_gimple_stmt((gimple)stmt);
++ gcc_unreachable();
++ }
++}
++
+static bool is_bool(const_tree node)
+{
+ const_tree type;
@@ -123405,7 +126958,8 @@ index 0000000..4bddad2
+
+gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
+{
-+ gimple assign, def_stmt;
++ gimple def_stmt;
++ gassign *assign;
+
+ gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
+ gcc_assert(!is_gimple_constant(rhs));
@@ -123461,15 +127015,16 @@ index 0000000..4bddad2
+
diff --git a/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
new file mode 100644
-index 0000000..7c9e6d1
+index 0000000..de5999d
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/remove_unnecessary_dup.c
-@@ -0,0 +1,138 @@
+@@ -0,0 +1,139 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -123487,7 +127042,7 @@ index 0000000..7c9e6d1
+#include "gcc-common.h"
+#include "size_overflow.h"
+
-+bool skip_expr_on_double_type(const_gimple stmt)
++bool skip_expr_on_double_type(const gassign *stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code(stmt);
+
@@ -123509,19 +127064,19 @@ index 0000000..7c9e6d1
+ }
+}
+
-+void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs)
++void create_up_and_down_cast(struct visited *visited, gassign *use_stmt, tree orig_type, tree rhs)
+{
+ const_tree orig_rhs1;
+ tree down_lhs, new_lhs, dup_type = TREE_TYPE(rhs);
-+ gimple down_cast, up_cast;
++ const_gimple down_cast, up_cast;
+ gimple_stmt_iterator gsi = gsi_for_stmt(use_stmt);
+
+ down_cast = build_cast_stmt(visited, orig_type, rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ down_lhs = gimple_assign_lhs(down_cast);
++ down_lhs = get_lhs(down_cast);
+
+ gsi = gsi_for_stmt(use_stmt);
+ up_cast = build_cast_stmt(visited, dup_type, down_lhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
-+ new_lhs = gimple_assign_lhs(up_cast);
++ new_lhs = get_lhs(up_cast);
+
+ orig_rhs1 = gimple_assign_rhs1(use_stmt);
+ if (operand_equal_p(orig_rhs1, rhs, 0))
@@ -123565,7 +127120,7 @@ index 0000000..7c9e6d1
+ return new_type;
+}
+
-+static void insert_cast_rhs(struct visited *visited, gimple stmt, tree rhs)
++static void insert_cast_rhs(struct visited *visited, gassign *stmt, tree rhs)
+{
+ tree type;
+
@@ -123580,7 +127135,7 @@ index 0000000..7c9e6d1
+ create_up_and_down_cast(visited, stmt, type, rhs);
+}
+
-+static void insert_cast(struct visited *visited, gimple stmt, tree rhs)
++static void insert_cast(struct visited *visited, gassign *stmt, tree rhs)
+{
+ if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && !is_size_overflow_type(rhs))
+ return;
@@ -123588,7 +127143,7 @@ index 0000000..7c9e6d1
+ insert_cast_rhs(visited, stmt, rhs);
+}
+
-+void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type)
++void insert_cast_expr(struct visited *visited, gassign *stmt, enum intentional_overflow_type type)
+{
+ tree rhs1, rhs2;
+
@@ -123605,10 +127160,10 @@ index 0000000..7c9e6d1
+
diff --git a/tools/gcc/size_overflow_plugin/size_overflow.h b/tools/gcc/size_overflow_plugin/size_overflow.h
new file mode 100644
-index 0000000..37f8fc3
+index 0000000..20732b1
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow.h
-@@ -0,0 +1,127 @@
+@@ -0,0 +1,183 @@
+#ifndef SIZE_OVERFLOW_H
+#define SIZE_OVERFLOW_H
+
@@ -123630,11 +127185,66 @@ index 0000000..37f8fc3
+ NO_INTENTIONAL_OVERFLOW, RHS1_INTENTIONAL_OVERFLOW, RHS2_INTENTIONAL_OVERFLOW
+};
+
++
++#if BUILDING_GCC_VERSION >= 5000
++typedef struct hash_set<const_gimple> gimple_set;
++
++static inline bool pointer_set_insert(gimple_set *visited, const_gimple stmt)
++{
++ return visited->add(stmt);
++}
++
++static inline bool pointer_set_contains(gimple_set *visited, const_gimple stmt)
++{
++ return visited->contains(stmt);
++}
++
++static inline gimple_set* pointer_set_create(void)
++{
++ return new hash_set<const_gimple>;
++}
++
++static inline void pointer_set_destroy(gimple_set *visited)
++{
++ delete visited;
++}
++
++typedef struct hash_set<tree> tree_set;
++
++static inline bool pointer_set_insert(tree_set *visited, tree node)
++{
++ return visited->add(node);
++}
++
++static inline bool pointer_set_contains(tree_set *visited, tree node)
++{
++ return visited->contains(node);
++}
++
++static inline tree_set *tree_pointer_set_create(void)
++{
++ return new hash_set<tree>;
++}
++
++static inline void pointer_set_destroy(tree_set *visited)
++{
++ delete visited;
++}
++#else
++typedef struct pointer_set_t gimple_set;
++typedef struct pointer_set_t tree_set;
++
++static inline tree_set *tree_pointer_set_create(void)
++{
++ return pointer_set_create();
++}
++#endif
++
+struct visited {
-+ struct pointer_set_t *stmts;
-+ struct pointer_set_t *my_stmts;
-+ struct pointer_set_t *skip_expr_casts;
-+ struct pointer_set_t *no_cast_check;
++ gimple_set *stmts;
++ gimple_set *my_stmts;
++ gimple_set *skip_expr_casts;
++ gimple_set *no_cast_check;
+};
+
+// size_overflow_plugin.c
@@ -123665,10 +127275,10 @@ index 0000000..37f8fc3
+ unsigned int num;
+ enum mark intentional_attr_decl;
+ enum mark intentional_attr_cur_fndecl;
-+ gimple intentional_mark_from_gimple;
++ gasm *intentional_mark_from_gimple;
+};
+
-+extern bool is_size_overflow_asm(const_gimple stmt);
++extern bool is_size_overflow_asm(const gasm *stmt);
+extern unsigned int get_function_num(const_tree node, const_tree orig_fndecl);
+extern unsigned int get_correct_arg_count(unsigned int argnum, const_tree fndecl);
+extern bool is_missing_function(const_tree orig_fndecl, unsigned int num);
@@ -123683,8 +127293,8 @@ index 0000000..37f8fc3
+
+// intentional_overflow.c
+extern enum mark get_intentional_attr_type(const_tree node);
-+extern bool is_size_overflow_intentional_asm_yes(const_gimple stmt);
-+extern bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt);
++extern bool is_size_overflow_intentional_asm_yes(const gasm *stmt);
++extern bool is_size_overflow_intentional_asm_turn_off(const gasm *stmt);
+extern bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum);
+extern bool is_yes_intentional_attr(const_tree decl, unsigned int argnum);
+extern bool is_turn_off_intentional_attr(const_tree decl);
@@ -123692,12 +127302,12 @@ index 0000000..37f8fc3
+extern void check_intentional_attribute_ipa(struct interesting_node *cur_node);
+extern bool is_a_cast_and_const_overflow(const_tree no_const_rhs);
+extern bool is_const_plus_unsigned_signed_truncation(const_tree lhs);
-+extern bool is_a_constant_overflow(const_gimple stmt, const_tree rhs);
-+extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2);
++extern bool is_a_constant_overflow(const gassign *stmt, const_tree rhs);
++extern tree handle_intentional_overflow(struct visited *visited, struct cgraph_node *caller_node, bool check_overflow, gassign *stmt, tree change_rhs, tree new_rhs2);
+extern tree handle_integer_truncation(struct visited *visited, struct cgraph_node *caller_node, const_tree lhs);
+extern bool is_a_neg_overflow(const_gimple stmt, const_tree rhs);
-+extern enum intentional_overflow_type add_mul_intentional_overflow(const_gimple def_stmt);
-+extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gimple stmt);
++extern enum intentional_overflow_type add_mul_intentional_overflow(const gassign *def_stmt);
++extern void unsigned_signed_cast_intentional_overflow(struct visited *visited, gassign *stmt);
+
+
+// insert_size_overflow_check_ipa.c
@@ -123714,6 +127324,7 @@ index 0000000..37f8fc3
+// misc.c
+extern void set_current_function_decl(tree fndecl);
+extern void unset_current_function_decl(void);
++extern tree get_lhs(const_gimple stmt);
+extern gimple get_def_stmt(const_tree node);
+extern tree create_new_var(tree type);
+extern gimple build_cast_stmt(struct visited *visited, tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force);
@@ -123725,28 +127336,29 @@ index 0000000..37f8fc3
+// insert_size_overflow_check_core.c
+extern tree expand(struct visited *visited, struct cgraph_node *caller_node, tree lhs);
+extern void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
-+extern tree dup_assign(struct visited *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
++extern tree dup_assign(struct visited *visited, gassign *oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
+extern tree create_assign(struct visited *visited, gimple oldstmt, tree rhs1, bool before);
+
+
+// remove_unnecessary_dup.c
+extern struct opt_pass *make_remove_unnecessary_dup_pass(void);
-+extern void insert_cast_expr(struct visited *visited, gimple stmt, enum intentional_overflow_type type);
-+extern bool skip_expr_on_double_type(const_gimple stmt);
-+extern void create_up_and_down_cast(struct visited *visited, gimple use_stmt, tree orig_type, tree rhs);
++extern void insert_cast_expr(struct visited *visited, gassign *stmt, enum intentional_overflow_type type);
++extern bool skip_expr_on_double_type(const gassign *stmt);
++extern void create_up_and_down_cast(struct visited *visited, gassign *use_stmt, tree orig_type, tree rhs);
+
+#endif
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_debug.c b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
new file mode 100644
-index 0000000..4378111
+index 0000000..176c32f
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_debug.c
-@@ -0,0 +1,116 @@
+@@ -0,0 +1,123 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -123763,7 +127375,7 @@ index 0000000..4378111
+
+#include "gcc-common.h"
+
-+static unsigned int dump_functions(void)
++static unsigned int __unused dump_functions(void)
+{
+ struct cgraph_node *node;
+
@@ -123798,6 +127410,7 @@ index 0000000..4378111
+}
+
+#if BUILDING_GCC_VERSION >= 4009
++namespace {
+static const struct pass_data dump_pass_data = {
+#else
+static struct ipa_opt_pass_d dump_pass = {
@@ -123808,7 +127421,8 @@ index 0000000..4378111
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
-+#if BUILDING_GCC_VERSION >= 4009
++#if BUILDING_GCC_VERSION >= 5000
++#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = false,
+ .has_execute = true,
+#else
@@ -123841,23 +127455,27 @@ index 0000000..4378111
+};
+
+#if BUILDING_GCC_VERSION >= 4009
-+namespace {
+class dump_pass : public ipa_opt_pass_d {
+public:
+ dump_pass() : ipa_opt_pass_d(dump_pass_data, g, NULL, NULL, NULL, NULL, NULL, NULL, 0, NULL, NULL) {}
++#if BUILDING_GCC_VERSION >= 5000
++ virtual unsigned int execute(function *) { return dump_functions(); }
++#else
+ unsigned int execute() { return dump_functions(); }
++#endif
+};
+}
-+#endif
+
-+struct opt_pass *make_dump_pass(void)
++opt_pass *make_dump_pass(void)
+{
-+#if BUILDING_GCC_VERSION >= 4009
+ return new dump_pass();
++}
+#else
++struct opt_pass *make_dump_pass(void)
++{
+ return &dump_pass.pass;
-+#endif
+}
++#endif
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
index 0000000..cd3c18f
@@ -129101,15 +132719,16 @@ index 0000000..4ad4525
+zpios_read_64734 zpios_read 3 64734 NULL
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
new file mode 100644
-index 0000000..95f7abd
+index 0000000..7e07890
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin.c
-@@ -0,0 +1,259 @@
+@@ -0,0 +1,260 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -129137,7 +132756,7 @@ index 0000000..95f7abd
+tree size_overflow_type_TI;
+
+static struct plugin_info size_overflow_plugin_info = {
-+ .version = "20140725",
++ .version = "20140725_01",
+ .help = "no-size-overflow\tturn off size overflow checking\n",
+};
+
@@ -129191,7 +132810,7 @@ index 0000000..95f7abd
+ return NULL_TREE;
+ }
+
-+ if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
++ if (tree_to_shwi(TREE_VALUE(args)) != 0)
+ return NULL_TREE;
+
+ for (; args; args = TREE_CHAIN(args)) {
@@ -129366,15 +132985,16 @@ index 0000000..95f7abd
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
new file mode 100644
-index 0000000..0888f6c
+index 0000000..2a693fe
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_plugin_hash.c
-@@ -0,0 +1,364 @@
+@@ -0,0 +1,355 @@
+/*
-+ * Copyright 2011-2014 by Emese Revfy <re.emese@gmail.com>
++ * Copyright 2011-2015 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
++ * https://github.com/ephox-gcc-plugins
+ * http://www.grsecurity.net/~ephox/overflow_plugin/
+ *
+ * Documentation:
@@ -129602,43 +133222,33 @@ index 0000000..0888f6c
+ return CANNOT_FIND_ARG;
+}
+
-+static const char *get_asm_string(const_gimple stmt)
-+{
-+ if (!stmt)
-+ return NULL;
-+ if (gimple_code(stmt) != GIMPLE_ASM)
-+ return NULL;
-+
-+ return gimple_asm_string(stmt);
-+}
-+
-+bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
++bool is_size_overflow_intentional_asm_turn_off(const gasm *stmt)
+{
+ const char *str;
+
-+ str = get_asm_string(stmt);
-+ if (!str)
++ if (!stmt)
+ return false;
++ str = gimple_asm_string(stmt);
+ return !strncmp(str, TURN_OFF_ASM_STR, sizeof(TURN_OFF_ASM_STR) - 1);
+}
+
-+bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
++bool is_size_overflow_intentional_asm_yes(const gasm *stmt)
+{
+ const char *str;
+
-+ str = get_asm_string(stmt);
-+ if (!str)
++ if (!stmt)
+ return false;
++ str = gimple_asm_string(stmt);
+ return !strncmp(str, YES_ASM_STR, sizeof(YES_ASM_STR) - 1);
+}
+
-+bool is_size_overflow_asm(const_gimple stmt)
++bool is_size_overflow_asm(const gasm *stmt)
+{
+ const char *str;
+
-+ str = get_asm_string(stmt);
-+ if (!str)
++ if (!stmt)
+ return false;
++ str = gimple_asm_string(stmt);
+ return !strncmp(str, OK_ASM_STR, sizeof(OK_ASM_STR) - 1);
+}
+
diff --git a/4.0.8/1007_linux-4.0.8.patch b/4.0.8/1007_linux-4.0.8.patch
deleted file mode 100644
index 609598e..0000000
--- a/4.0.8/1007_linux-4.0.8.patch
+++ /dev/null
@@ -1,2139 +0,0 @@
-diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
-index 750d577..f5a8ca2 100644
---- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
-+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
-@@ -1,7 +1,7 @@
- * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
-
- Required properties:
--- compatible: should be "marvell,armada-370-neta".
-+- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
- - reg: address and length of the register set for the device.
- - interrupts: interrupt for the device
- - phy: See ethernet.txt file in the same directory.
-diff --git a/Makefile b/Makefile
-index bd76a8e..0e315d6 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 0
--SUBLEVEL = 7
-+SUBLEVEL = 8
- EXTRAVERSION =
- NAME = Hurr durr I'ma sheep
-
-diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
-index 8a322ad..a038c20 100644
---- a/arch/arm/boot/dts/armada-370-xp.dtsi
-+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
-@@ -265,7 +265,6 @@
- };
-
- eth0: ethernet@70000 {
-- compatible = "marvell,armada-370-neta";
- reg = <0x70000 0x4000>;
- interrupts = <8>;
- clocks = <&gateclk 4>;
-@@ -281,7 +280,6 @@
- };
-
- eth1: ethernet@74000 {
-- compatible = "marvell,armada-370-neta";
- reg = <0x74000 0x4000>;
- interrupts = <10>;
- clocks = <&gateclk 3>;
-diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
-index 27397f1..3773025 100644
---- a/arch/arm/boot/dts/armada-370.dtsi
-+++ b/arch/arm/boot/dts/armada-370.dtsi
-@@ -306,6 +306,14 @@
- dmacap,memset;
- };
- };
-+
-+ ethernet@70000 {
-+ compatible = "marvell,armada-370-neta";
-+ };
-+
-+ ethernet@74000 {
-+ compatible = "marvell,armada-370-neta";
-+ };
- };
- };
- };
-diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
-index 4a7cbed..1676d30 100644
---- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
-+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
-@@ -319,7 +319,7 @@
- };
-
- eth3: ethernet@34000 {
-- compatible = "marvell,armada-370-neta";
-+ compatible = "marvell,armada-xp-neta";
- reg = <0x34000 0x4000>;
- interrupts = <14>;
- clocks = <&gateclk 1>;
-diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
-index 36ce63a..d41fe88 100644
---- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
-+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
-@@ -357,7 +357,7 @@
- };
-
- eth3: ethernet@34000 {
-- compatible = "marvell,armada-370-neta";
-+ compatible = "marvell,armada-xp-neta";
- reg = <0x34000 0x4000>;
- interrupts = <14>;
- clocks = <&gateclk 1>;
-diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
-index 8291723..9ce7d5f 100644
---- a/arch/arm/boot/dts/armada-xp.dtsi
-+++ b/arch/arm/boot/dts/armada-xp.dtsi
-@@ -175,7 +175,7 @@
- };
-
- eth2: ethernet@30000 {
-- compatible = "marvell,armada-370-neta";
-+ compatible = "marvell,armada-xp-neta";
- reg = <0x30000 0x4000>;
- interrupts = <12>;
- clocks = <&gateclk 2>;
-@@ -218,6 +218,14 @@
- };
- };
-
-+ ethernet@70000 {
-+ compatible = "marvell,armada-xp-neta";
-+ };
-+
-+ ethernet@74000 {
-+ compatible = "marvell,armada-xp-neta";
-+ };
-+
- xor@f0900 {
- compatible = "marvell,orion-xor";
- reg = <0xF0900 0x100
-diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
-index 79caf79..f7db3a5 100644
---- a/arch/arm/kvm/interrupts.S
-+++ b/arch/arm/kvm/interrupts.S
-@@ -170,13 +170,9 @@ __kvm_vcpu_return:
- @ Don't trap coprocessor accesses for host kernel
- set_hstr vmexit
- set_hdcr vmexit
-- set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
-+ set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
-
- #ifdef CONFIG_VFPv3
-- @ Save floating point registers we if let guest use them.
-- tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
-- bne after_vfp_restore
--
- @ Switch VFP/NEON hardware state to the host's
- add r7, vcpu, #VCPU_VFP_GUEST
- store_vfp_state r7
-@@ -188,6 +184,8 @@ after_vfp_restore:
- @ Restore FPEXC_EN which we clobbered on entry
- pop {r2}
- VFPFMXR FPEXC, r2
-+#else
-+after_vfp_restore:
- #endif
-
- @ Reset Hyp-role
-@@ -483,7 +481,7 @@ switch_to_guest_vfp:
- push {r3-r7}
-
- @ NEON/VFP used. Turn on VFP access.
-- set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
-+ set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
-
- @ Switch VFP/NEON hardware state to the guest's
- add r7, r0, #VCPU_VFP_HOST
-diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
-index 14d4883..f6f1481 100644
---- a/arch/arm/kvm/interrupts_head.S
-+++ b/arch/arm/kvm/interrupts_head.S
-@@ -599,8 +599,13 @@ ARM_BE8(rev r6, r6 )
- .endm
-
- /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
-- * (hardware reset value is 0). Keep previous value in r2. */
--.macro set_hcptr operation, mask
-+ * (hardware reset value is 0). Keep previous value in r2.
-+ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
-+ * VFP wasn't already enabled (always executed on vmtrap).
-+ * If a label is specified with vmexit, it is branched to if VFP wasn't
-+ * enabled.
-+ */
-+.macro set_hcptr operation, mask, label = none
- mrc p15, 4, r2, c1, c1, 2
- ldr r3, =\mask
- .if \operation == vmentry
-@@ -609,6 +614,17 @@ ARM_BE8(rev r6, r6 )
- bic r3, r2, r3 @ Don't trap defined coproc-accesses
- .endif
- mcr p15, 4, r3, c1, c1, 2
-+ .if \operation != vmentry
-+ .if \operation == vmexit
-+ tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
-+ beq 1f
-+ .endif
-+ isb
-+ .if \label != none
-+ b \label
-+ .endif
-+1:
-+ .endif
- .endm
-
- /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
-diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
-index 02fa8ef..531e922 100644
---- a/arch/arm/kvm/psci.c
-+++ b/arch/arm/kvm/psci.c
-@@ -230,10 +230,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
- case PSCI_0_2_FN64_AFFINITY_INFO:
- val = kvm_psci_vcpu_affinity_info(vcpu);
- break;
-- case PSCI_0_2_FN_MIGRATE:
-- case PSCI_0_2_FN64_MIGRATE:
-- val = PSCI_RET_NOT_SUPPORTED;
-- break;
- case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
- /*
- * Trusted OS is MP hence does not require migration
-@@ -242,10 +238,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
- */
- val = PSCI_0_2_TOS_MP;
- break;
-- case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
-- case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
-- val = PSCI_RET_NOT_SUPPORTED;
-- break;
- case PSCI_0_2_FN_SYSTEM_OFF:
- kvm_psci_system_off(vcpu);
- /*
-@@ -271,7 +263,8 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
- ret = 0;
- break;
- default:
-- return -EINVAL;
-+ val = PSCI_RET_NOT_SUPPORTED;
-+ break;
- }
-
- *vcpu_reg(vcpu, 0) = val;
-@@ -291,12 +284,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
- case KVM_PSCI_FN_CPU_ON:
- val = kvm_psci_vcpu_on(vcpu);
- break;
-- case KVM_PSCI_FN_CPU_SUSPEND:
-- case KVM_PSCI_FN_MIGRATE:
-+ default:
- val = PSCI_RET_NOT_SUPPORTED;
- break;
-- default:
-- return -EINVAL;
- }
-
- *vcpu_reg(vcpu, 0) = val;
-diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
-index d04a430..3a3f88c 100644
---- a/arch/arm/mach-imx/clk-imx6q.c
-+++ b/arch/arm/mach-imx/clk-imx6q.c
-@@ -439,7 +439,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
- clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
- clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
- clk[IMX6QDL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
-- clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
-+ clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
- clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
- clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
- clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
-diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
-index 6dfd4ab..301ab38 100644
---- a/arch/arm/mach-mvebu/pm-board.c
-+++ b/arch/arm/mach-mvebu/pm-board.c
-@@ -43,6 +43,9 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
- for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
- ackcmd |= BIT(pic_raw_gpios[i]);
-
-+ srcmd = cpu_to_le32(srcmd);
-+ ackcmd = cpu_to_le32(ackcmd);
-+
- /*
- * Wait a while, the PIC needs quite a bit of time between the
- * two GPIO commands.
-diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
-index 4f25a7c..a351eff 100644
---- a/arch/arm/mach-tegra/cpuidle-tegra20.c
-+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
-@@ -35,6 +35,7 @@
- #include "iomap.h"
- #include "irq.h"
- #include "pm.h"
-+#include "reset.h"
- #include "sleep.h"
-
- #ifdef CONFIG_PM_SLEEP
-@@ -71,15 +72,13 @@ static struct cpuidle_driver tegra_idle_driver = {
-
- #ifdef CONFIG_PM_SLEEP
- #ifdef CONFIG_SMP
--static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
--
- static int tegra20_reset_sleeping_cpu_1(void)
- {
- int ret = 0;
-
- tegra_pen_lock();
-
-- if (readl(pmc + PMC_SCRATCH41) == CPU_RESETTABLE)
-+ if (readb(tegra20_cpu1_resettable_status) == CPU_RESETTABLE)
- tegra20_cpu_shutdown(1);
- else
- ret = -EINVAL;
-diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
-index 71be4af..e3070fd 100644
---- a/arch/arm/mach-tegra/reset-handler.S
-+++ b/arch/arm/mach-tegra/reset-handler.S
-@@ -169,10 +169,10 @@ after_errata:
- cmp r6, #TEGRA20
- bne 1f
- /* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
-- mov32 r5, TEGRA_PMC_BASE
-- mov r0, #0
-+ mov32 r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
-+ mov r0, #CPU_NOT_RESETTABLE
- cmp r10, #0
-- strne r0, [r5, #PMC_SCRATCH41]
-+ strneb r0, [r5, #__tegra20_cpu1_resettable_status_offset]
- 1:
- #endif
-
-@@ -281,6 +281,10 @@ __tegra_cpu_reset_handler_data:
- .rept TEGRA_RESET_DATA_SIZE
- .long 0
- .endr
-+ .globl __tegra20_cpu1_resettable_status_offset
-+ .equ __tegra20_cpu1_resettable_status_offset, \
-+ . - __tegra_cpu_reset_handler_start
-+ .byte 0
- .align L1_CACHE_SHIFT
-
- ENTRY(__tegra_cpu_reset_handler_end)
-diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
-index 76a9343..29c3dec 100644
---- a/arch/arm/mach-tegra/reset.h
-+++ b/arch/arm/mach-tegra/reset.h
-@@ -35,6 +35,7 @@ extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
-
- void __tegra_cpu_reset_handler_start(void);
- void __tegra_cpu_reset_handler(void);
-+void __tegra20_cpu1_resettable_status_offset(void);
- void __tegra_cpu_reset_handler_end(void);
- void tegra_secondary_startup(void);
-
-@@ -47,6 +48,9 @@ void tegra_secondary_startup(void);
- (IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
- ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
- (u32)__tegra_cpu_reset_handler_start)))
-+#define tegra20_cpu1_resettable_status \
-+ (IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
-+ (u32)__tegra20_cpu1_resettable_status_offset))
- #endif
-
- #define tegra_cpu_reset_handler_offset \
-diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
-index be4bc5f..e6b684e 100644
---- a/arch/arm/mach-tegra/sleep-tegra20.S
-+++ b/arch/arm/mach-tegra/sleep-tegra20.S
-@@ -97,9 +97,10 @@ ENDPROC(tegra20_hotplug_shutdown)
- ENTRY(tegra20_cpu_shutdown)
- cmp r0, #0
- reteq lr @ must not be called for CPU 0
-- mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
-+ mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
-+ ldr r2, =__tegra20_cpu1_resettable_status_offset
- mov r12, #CPU_RESETTABLE
-- str r12, [r1]
-+ strb r12, [r1, r2]
-
- cpu_to_halt_reg r1, r0
- ldr r3, =TEGRA_FLOW_CTRL_VIRT
-@@ -182,38 +183,41 @@ ENDPROC(tegra_pen_unlock)
- /*
- * tegra20_cpu_clear_resettable(void)
- *
-- * Called to clear the "resettable soon" flag in PMC_SCRATCH41 when
-+ * Called to clear the "resettable soon" flag in IRAM variable when
- * it is expected that the secondary CPU will be idle soon.
- */
- ENTRY(tegra20_cpu_clear_resettable)
-- mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
-+ mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
-+ ldr r2, =__tegra20_cpu1_resettable_status_offset
- mov r12, #CPU_NOT_RESETTABLE
-- str r12, [r1]
-+ strb r12, [r1, r2]
- ret lr
- ENDPROC(tegra20_cpu_clear_resettable)
-
- /*
- * tegra20_cpu_set_resettable_soon(void)
- *
-- * Called to set the "resettable soon" flag in PMC_SCRATCH41 when
-+ * Called to set the "resettable soon" flag in IRAM variable when
- * it is expected that the secondary CPU will be idle soon.
- */
- ENTRY(tegra20_cpu_set_resettable_soon)
-- mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
-+ mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
-+ ldr r2, =__tegra20_cpu1_resettable_status_offset
- mov r12, #CPU_RESETTABLE_SOON
-- str r12, [r1]
-+ strb r12, [r1, r2]
- ret lr
- ENDPROC(tegra20_cpu_set_resettable_soon)
-
- /*
- * tegra20_cpu_is_resettable_soon(void)
- *
-- * Returns true if the "resettable soon" flag in PMC_SCRATCH41 has been
-+ * Returns true if the "resettable soon" flag in IRAM variable has been
- * set because it is expected that the secondary CPU will be idle soon.
- */
- ENTRY(tegra20_cpu_is_resettable_soon)
-- mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
-- ldr r12, [r1]
-+ mov32 r1, TEGRA_IRAM_RESET_BASE_VIRT
-+ ldr r2, =__tegra20_cpu1_resettable_status_offset
-+ ldrb r12, [r1, r2]
- cmp r12, #CPU_RESETTABLE_SOON
- moveq r0, #1
- movne r0, #0
-@@ -256,9 +260,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
- mov r0, #TEGRA_FLUSH_CACHE_LOUIS
- bl tegra_disable_clean_inv_dcache
-
-- mov32 r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
-+ mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT
-+ ldr r4, =__tegra20_cpu1_resettable_status_offset
- mov r3, #CPU_RESETTABLE
-- str r3, [r0]
-+ strb r3, [r0, r4]
-
- bl tegra_cpu_do_idle
-
-@@ -274,10 +279,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
-
- bl tegra_pen_lock
-
-- mov32 r3, TEGRA_PMC_VIRT
-- add r0, r3, #PMC_SCRATCH41
-+ mov32 r0, TEGRA_IRAM_RESET_BASE_VIRT
-+ ldr r4, =__tegra20_cpu1_resettable_status_offset
- mov r3, #CPU_NOT_RESETTABLE
-- str r3, [r0]
-+ strb r3, [r0, r4]
-
- bl tegra_pen_unlock
-
-diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
-index 92d46ec..0d59360 100644
---- a/arch/arm/mach-tegra/sleep.h
-+++ b/arch/arm/mach-tegra/sleep.h
-@@ -18,6 +18,7 @@
- #define __MACH_TEGRA_SLEEP_H
-
- #include "iomap.h"
-+#include "irammap.h"
-
- #define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS \
- + IO_CPU_VIRT)
-@@ -29,6 +30,9 @@
- + IO_APB_VIRT)
- #define TEGRA_PMC_VIRT (TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
-
-+#define TEGRA_IRAM_RESET_BASE_VIRT (IO_IRAM_VIRT + \
-+ TEGRA_IRAM_RESET_HANDLER_OFFSET)
-+
- /* PMC_SCRATCH37-39 and 41 are used for tegra_pen_lock and idle */
- #define PMC_SCRATCH37 0x130
- #define PMC_SCRATCH38 0x134
-diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
-index 9488fa5..afc96ec 100644
---- a/arch/mips/include/asm/mach-generic/spaces.h
-+++ b/arch/mips/include/asm/mach-generic/spaces.h
-@@ -94,7 +94,11 @@
- #endif
-
- #ifndef FIXADDR_TOP
-+#ifdef CONFIG_KVM_GUEST
-+#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
-+#else
- #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
- #endif
-+#endif
-
- #endif /* __ASM_MACH_GENERIC_SPACES_H */
-diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index f5e7dda..adf3886 100644
---- a/arch/mips/kvm/mips.c
-+++ b/arch/mips/kvm/mips.c
-@@ -785,7 +785,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-
- /* If nothing is dirty, don't bother messing with page tables. */
- if (is_dirty) {
-- memslot = &kvm->memslots->memslots[log->slot];
-+ memslot = id_to_memslot(kvm->memslots, log->slot);
-
- ga = memslot->base_gfn << PAGE_SHIFT;
- ga_end = ga + (memslot->npages << PAGE_SHIFT);
-diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
-index 7c4f669..3cb25fd 100644
---- a/arch/powerpc/perf/core-book3s.c
-+++ b/arch/powerpc/perf/core-book3s.c
-@@ -131,7 +131,16 @@ static void pmao_restore_workaround(bool ebb) { }
-
- static bool regs_use_siar(struct pt_regs *regs)
- {
-- return !!regs->result;
-+ /*
-+ * When we take a performance monitor exception the regs are setup
-+ * using perf_read_regs() which overloads some fields, in particular
-+ * regs->result to tell us whether to use SIAR.
-+ *
-+ * However if the regs are from another exception, eg. a syscall, then
-+ * they have not been setup using perf_read_regs() and so regs->result
-+ * is something random.
-+ */
-+ return ((TRAP(regs) == 0xf00) && regs->result);
- }
-
- /*
-diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
-index 9f73c80..49b7445 100644
---- a/arch/s390/kernel/crash_dump.c
-+++ b/arch/s390/kernel/crash_dump.c
-@@ -415,7 +415,7 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
- ptr += len;
- /* Copy lower halves of SIMD registers 0-15 */
- for (i = 0; i < 16; i++) {
-- memcpy(ptr, &vx_regs[i], 8);
-+ memcpy(ptr, &vx_regs[i].u[2], 8);
- ptr += 8;
- }
- return ptr;
-diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
-index e7bc2fd..b2b7ddf 100644
---- a/arch/s390/kvm/interrupt.c
-+++ b/arch/s390/kvm/interrupt.c
-@@ -1037,7 +1037,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
- if (sclp_has_sigpif())
- return __inject_extcall_sigpif(vcpu, src_id);
-
-- if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
-+ if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
- return -EBUSY;
- *extcall = irq->u.extcall;
- atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
-index 274a9f5..591f119f 100644
---- a/arch/sparc/kernel/ldc.c
-+++ b/arch/sparc/kernel/ldc.c
-@@ -2313,7 +2313,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
- if (len & (8UL - 1))
- return ERR_PTR(-EINVAL);
-
-- buf = kzalloc(len, GFP_KERNEL);
-+ buf = kzalloc(len, GFP_ATOMIC);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index b7d31ca..570c71d 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -177,7 +177,7 @@ config SBUS
-
- config NEED_DMA_MAP_STATE
- def_bool y
-- depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
-+ depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
-
- config NEED_SG_DMA_LENGTH
- def_bool y
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 1c0fb57..e02589d 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -583,7 +583,7 @@ struct kvm_arch {
- struct kvm_pic *vpic;
- struct kvm_ioapic *vioapic;
- struct kvm_pit *vpit;
-- int vapics_in_nmi_mode;
-+ atomic_t vapics_in_nmi_mode;
- struct mutex apic_map_lock;
- struct kvm_apic_map *apic_map;
-
-diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
-index 298781d..1406ffd 100644
---- a/arch/x86/kvm/i8254.c
-+++ b/arch/x86/kvm/i8254.c
-@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
- * LVT0 to NMI delivery. Other PIC interrupts are just sent to
- * VCPU0, and only if its LVT0 is in EXTINT mode.
- */
-- if (kvm->arch.vapics_in_nmi_mode > 0)
-+ if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_apic_nmi_wd_deliver(vcpu);
- }
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 3cb2b58..8ee4aa7 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -1224,10 +1224,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
- if (!nmi_wd_enabled) {
- apic_debug("Receive NMI setting on APIC_LVT0 "
- "for cpu %d\n", apic->vcpu->vcpu_id);
-- apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
-+ atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
- }
- } else if (nmi_wd_enabled)
-- apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
-+ atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
- }
-
- static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
-@@ -1784,6 +1784,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
- apic_update_ppr(apic);
- hrtimer_cancel(&apic->lapic_timer.timer);
- apic_update_lvtt(apic);
-+ apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
- update_divide_count(apic);
- start_apic_timer(apic);
- apic->irr_pending = true;
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index a4e62fc..1b32e29 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -511,8 +511,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
-
-- if (svm->vmcb->control.next_rip != 0)
-+ if (svm->vmcb->control.next_rip != 0) {
-+ WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
- svm->next_rip = svm->vmcb->control.next_rip;
-+ }
-
- if (!svm->next_rip) {
- if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
-@@ -4310,7 +4312,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
- break;
- }
-
-- vmcb->control.next_rip = info->next_rip;
-+ /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
-+ if (static_cpu_has(X86_FEATURE_NRIPS))
-+ vmcb->control.next_rip = info->next_rip;
- vmcb->control.exit_code = icpt_info.exit_code;
- vmexit = nested_svm_exit_handled(svm);
-
-diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
-index d939633..b33615f 100644
---- a/arch/x86/pci/acpi.c
-+++ b/arch/x86/pci/acpi.c
-@@ -81,6 +81,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
- },
- },
-+ /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
-+ /* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
-+ {
-+ .callback = set_use_crs,
-+ .ident = "Foxconn K8M890-8237A",
-+ .matches = {
-+ DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
-+ DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
-+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
-+ },
-+ },
-
- /* Now for the blacklist.. */
-
-@@ -121,8 +132,10 @@ void __init pci_acpi_crs_quirks(void)
- {
- int year;
-
-- if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
-- pci_use_crs = false;
-+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
-+ if (iomem_resource.end <= 0xffffffff)
-+ pci_use_crs = false;
-+ }
-
- dmi_check_system(pci_crs_quirks);
-
-diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index 872c577..2c867a6 100644
---- a/drivers/cpufreq/intel_pstate.c
-+++ b/drivers/cpufreq/intel_pstate.c
-@@ -534,7 +534,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
-
- val |= vid;
-
-- wrmsrl(MSR_IA32_PERF_CTL, val);
-+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
- }
-
- #define BYT_BCLK_FREQS 5
-diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
-index 5937207..3442764 100644
---- a/drivers/cpuidle/cpuidle-powernv.c
-+++ b/drivers/cpuidle/cpuidle-powernv.c
-@@ -60,6 +60,8 @@ static int nap_loop(struct cpuidle_device *dev,
- return index;
- }
-
-+/* Register for fastsleep only in oneshot mode of broadcast */
-+#ifdef CONFIG_TICK_ONESHOT
- static int fastsleep_loop(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-@@ -83,7 +85,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
-
- return index;
- }
--
-+#endif
- /*
- * States for dedicated partition case.
- */
-@@ -209,7 +211,14 @@ static int powernv_add_idle_states(void)
- powernv_states[nr_idle_states].flags = 0;
- powernv_states[nr_idle_states].target_residency = 100;
- powernv_states[nr_idle_states].enter = &nap_loop;
-- } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
-+ }
-+
-+ /*
-+ * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
-+ * within this config dependency check.
-+ */
-+#ifdef CONFIG_TICK_ONESHOT
-+ if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
- flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
- /* Add FASTSLEEP state */
- strcpy(powernv_states[nr_idle_states].name, "FastSleep");
-@@ -218,7 +227,7 @@ static int powernv_add_idle_states(void)
- powernv_states[nr_idle_states].target_residency = 300000;
- powernv_states[nr_idle_states].enter = &fastsleep_loop;
- }
--
-+#endif
- powernv_states[nr_idle_states].exit_latency =
- ((unsigned int)latency_ns[i]) / 1000;
-
-diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
-index ebbae8d..9f7333a 100644
---- a/drivers/crypto/talitos.c
-+++ b/drivers/crypto/talitos.c
-@@ -927,7 +927,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- sg_count--;
- link_tbl_ptr--;
- }
-- be16_add_cpu(&link_tbl_ptr->len, cryptlen);
-+ link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
-+ + cryptlen);
-
- /* tag end of link table */
- link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
-@@ -2563,6 +2564,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
- break;
- default:
- dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
-+ kfree(t_alg);
- return ERR_PTR(-EINVAL);
- }
-
-diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index 48882c1..13cfbf4 100644
---- a/drivers/iommu/amd_iommu.c
-+++ b/drivers/iommu/amd_iommu.c
-@@ -1870,9 +1870,15 @@ static void free_pt_##LVL (unsigned long __pt) \
- pt = (u64 *)__pt; \
- \
- for (i = 0; i < 512; ++i) { \
-+ /* PTE present? */ \
- if (!IOMMU_PTE_PRESENT(pt[i])) \
- continue; \
- \
-+ /* Large PTE? */ \
-+ if (PM_PTE_LEVEL(pt[i]) == 0 || \
-+ PM_PTE_LEVEL(pt[i]) == 7) \
-+ continue; \
-+ \
- p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
- FN(p); \
- } \
-diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
-index bd6252b..2d1b203 100644
---- a/drivers/iommu/arm-smmu.c
-+++ b/drivers/iommu/arm-smmu.c
-@@ -1533,7 +1533,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
- return -ENODEV;
- }
-
-- if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
-+ if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
- smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
- dev_notice(smmu->dev, "\taddress translation ops\n");
- }
-diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
-index 0ad412a..d3a7bff 100644
---- a/drivers/mmc/host/sdhci.c
-+++ b/drivers/mmc/host/sdhci.c
-@@ -846,7 +846,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
- int sg_cnt;
-
- sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
-- if (sg_cnt == 0) {
-+ if (sg_cnt <= 0) {
- /*
- * This only happens when someone fed
- * us an invalid request.
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
-index d81fc6b..5c92fb7 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
-@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
- int ret;
-
- /* Try to obtain pages, decreasing order if necessary */
-- gfp |= __GFP_COLD | __GFP_COMP;
-+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
- while (order >= 0) {
- pages = alloc_pages(gfp, order);
- if (pages)
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
-index 1ec635f..196474f 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
-@@ -9323,7 +9323,8 @@ unload_error:
- * function stop ramrod is sent, since as part of this ramrod FW access
- * PTP registers.
- */
-- bnx2x_stop_ptp(bp);
-+ if (bp->flags & PTP_SUPPORTED)
-+ bnx2x_stop_ptp(bp);
-
- /* Disable HW interrupts, NAPI */
- bnx2x_netif_stop(bp, 1);
-diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
-index d20fc8e..c365765 100644
---- a/drivers/net/ethernet/intel/igb/igb_ptp.c
-+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
-@@ -540,8 +540,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
- igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
- igb->perout[i].period.tv_sec = ts.tv_sec;
- igb->perout[i].period.tv_nsec = ts.tv_nsec;
-- wr32(trgttiml, rq->perout.start.sec);
-- wr32(trgttimh, rq->perout.start.nsec);
-+ wr32(trgttimh, rq->perout.start.sec);
-+ wr32(trgttiml, rq->perout.start.nsec);
- tsauxc |= tsauxc_mask;
- tsim |= tsim_mask;
- } else {
-diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
-index 2db6532..87c7f52c 100644
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -304,6 +304,7 @@ struct mvneta_port {
- unsigned int link;
- unsigned int duplex;
- unsigned int speed;
-+ unsigned int tx_csum_limit;
- };
-
- /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
-@@ -2441,8 +2442,10 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
-
- dev->mtu = mtu;
-
-- if (!netif_running(dev))
-+ if (!netif_running(dev)) {
-+ netdev_update_features(dev);
- return 0;
-+ }
-
- /* The interface is running, so we have to force a
- * reallocation of the queues
-@@ -2471,9 +2474,26 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
- mvneta_start_dev(pp);
- mvneta_port_up(pp);
-
-+ netdev_update_features(dev);
-+
- return 0;
- }
-
-+static netdev_features_t mvneta_fix_features(struct net_device *dev,
-+ netdev_features_t features)
-+{
-+ struct mvneta_port *pp = netdev_priv(dev);
-+
-+ if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
-+ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
-+ netdev_info(dev,
-+ "Disable IP checksum for MTU greater than %dB\n",
-+ pp->tx_csum_limit);
-+ }
-+
-+ return features;
-+}
-+
- /* Get mac address */
- static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
- {
-@@ -2785,6 +2805,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
- .ndo_set_rx_mode = mvneta_set_rx_mode,
- .ndo_set_mac_address = mvneta_set_mac_addr,
- .ndo_change_mtu = mvneta_change_mtu,
-+ .ndo_fix_features = mvneta_fix_features,
- .ndo_get_stats64 = mvneta_get_stats64,
- .ndo_do_ioctl = mvneta_ioctl,
- };
-@@ -3023,6 +3044,9 @@ static int mvneta_probe(struct platform_device *pdev)
- }
- }
-
-+ if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
-+ pp->tx_csum_limit = 1600;
-+
- pp->tx_ring_size = MVNETA_MAX_TXD;
- pp->rx_ring_size = MVNETA_MAX_RXD;
-
-@@ -3095,6 +3119,7 @@ static int mvneta_remove(struct platform_device *pdev)
-
- static const struct of_device_id mvneta_match[] = {
- { .compatible = "marvell,armada-370-neta" },
-+ { .compatible = "marvell,armada-xp-neta" },
- { }
- };
- MODULE_DEVICE_TABLE(of, mvneta_match);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-index 2f1324b..f30c322 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-@@ -1971,10 +1971,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
- mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
- }
-
-- if (priv->base_tx_qpn) {
-- mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
-- priv->base_tx_qpn = 0;
-- }
- }
-
- int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-index 05ec5e1..3478c87 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-@@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
- }
- #endif
- static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
-- int hwtstamp_rx_filter)
-+ netdev_features_t dev_features)
- {
- __wsum hw_checksum = 0;
-
-@@ -731,14 +731,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
-
- hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
-
-- if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
-- hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
-- /* next protocol non IPv4 or IPv6 */
-- if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
-- != htons(ETH_P_IP) &&
-- ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
-- != htons(ETH_P_IPV6))
-- return -1;
-+ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
-+ !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
- hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
- hdr += sizeof(struct vlan_hdr);
- }
-@@ -901,7 +895,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
-
- if (ip_summed == CHECKSUM_COMPLETE) {
- void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
-- if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
-+ if (check_csum(cqe, gro_skb, va,
-+ dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_none++;
- ring->csum_complete--;
-@@ -956,7 +951,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- }
-
- if (ip_summed == CHECKSUM_COMPLETE) {
-- if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
-+ if (check_csum(cqe, skb, skb->data, dev->features)) {
- ip_summed = CHECKSUM_NONE;
- ring->csum_complete--;
- ring->csum_none++;
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index 8c234ec..35dd887 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- ring->size = size;
- ring->size_mask = size - 1;
- ring->stride = stride;
-+ ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
-
- tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
-@@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
- mlx4_bf_free(mdev->dev, &ring->bf);
- mlx4_qp_remove(mdev->dev, &ring->qp);
- mlx4_qp_free(mdev->dev, &ring->qp);
-+ mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- mlx4_en_unmap_buffer(&ring->wqres.buf);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- kfree(ring->bounce_buf);
-@@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
- MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
- }
-
-+static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
-+{
-+ return ring->prod - ring->cons > ring->full_size;
-+}
-+
- static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring, int index,
- u8 owner)
-@@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
-
- netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
-
-- /*
-- * Wakeup Tx queue if this stopped, and at least 1 packet
-- * was completed
-+ /* Wakeup Tx queue if this stopped, and ring is not full.
- */
-- if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
-+ if (netif_tx_queue_stopped(ring->tx_queue) &&
-+ !mlx4_en_is_tx_ring_full(ring)) {
- netif_tx_wake_queue(ring->tx_queue);
- ring->wake_queue++;
- }
-@@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
- skb_tx_timestamp(skb);
-
- /* Check available TXBBs And 2K spare for prefetch */
-- stop_queue = (int)(ring->prod - ring_cons) >
-- ring->size - HEADROOM - MAX_DESC_TXBBS;
-+ stop_queue = mlx4_en_is_tx_ring_full(ring);
- if (unlikely(stop_queue)) {
- netif_tx_stop_queue(ring->tx_queue);
- ring->queue_stopped++;
-@@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
- smp_rmb();
-
- ring_cons = ACCESS_ONCE(ring->cons);
-- if (unlikely(((int)(ring->prod - ring_cons)) <=
-- ring->size - HEADROOM - MAX_DESC_TXBBS)) {
-+ if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
- netif_tx_wake_queue(ring->tx_queue);
- ring->wake_queue++;
- }
-diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
-index 6fce587..0d80aed 100644
---- a/drivers/net/ethernet/mellanox/mlx4/intf.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
-@@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
- mutex_lock(&intf_mutex);
-
- list_add_tail(&intf->list, &intf_list);
-- list_for_each_entry(priv, &dev_list, dev_list)
-+ list_for_each_entry(priv, &dev_list, dev_list) {
-+ if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
-+ mlx4_dbg(&priv->dev,
-+ "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
-+ intf->flags &= ~MLX4_INTFF_BONDING;
-+ }
- mlx4_add_device(intf, priv);
-+ }
-
- mutex_unlock(&intf_mutex);
-
-diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-index 8687c8d..0bf0fdd 100644
---- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-@@ -280,6 +280,7 @@ struct mlx4_en_tx_ring {
- u32 size; /* number of TXBBs */
- u32 size_mask;
- u16 stride;
-+ u32 full_size;
- u16 cqn; /* index of port CQ associated with this ring */
- u32 buf_size;
- __be32 doorbell_qpn;
-@@ -601,7 +602,6 @@ struct mlx4_en_priv {
- int vids[128];
- bool wol;
- struct device *ddev;
-- int base_tx_qpn;
- struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
- struct hwtstamp_config hwtstamp_config;
-
-diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
-index bdfe51f..d551df6 100644
---- a/drivers/net/phy/phy_device.c
-+++ b/drivers/net/phy/phy_device.c
-@@ -796,10 +796,11 @@ static int genphy_config_advert(struct phy_device *phydev)
- if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
- adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-- if (adv != oldadv)
-- changed = 1;
- }
-
-+ if (adv != oldadv)
-+ changed = 1;
-+
- err = phy_write(phydev, MII_CTRL1000, adv);
- if (err < 0)
- return err;
-diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
-index 71d7802..5717117 100644
---- a/drivers/s390/kvm/virtio_ccw.c
-+++ b/drivers/s390/kvm/virtio_ccw.c
-@@ -65,6 +65,7 @@ struct virtio_ccw_device {
- bool is_thinint;
- bool going_away;
- bool device_lost;
-+ unsigned int config_ready;
- void *airq_info;
- };
-
-@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
- if (ret)
- goto out_free;
-
-- memcpy(vcdev->config, config_area, sizeof(vcdev->config));
-- memcpy(buf, &vcdev->config[offset], len);
-+ memcpy(vcdev->config, config_area, offset + len);
-+ if (buf)
-+ memcpy(buf, &vcdev->config[offset], len);
-+ if (vcdev->config_ready < offset + len)
-+ vcdev->config_ready = offset + len;
-
- out_free:
- kfree(config_area);
-@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
- if (!config_area)
- goto out_free;
-
-+ /* Make sure we don't overwrite fields. */
-+ if (vcdev->config_ready < offset)
-+ virtio_ccw_get_config(vdev, 0, NULL, offset);
- memcpy(&vcdev->config[offset], buf, len);
- /* Write the config area to the host. */
- memcpy(config_area, vcdev->config, sizeof(vcdev->config));
-diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 175c995..ce3b407 100644
---- a/drivers/usb/gadget/function/f_fs.c
-+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -845,7 +845,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
- ret = ep->status;
- if (io_data->read && ret > 0) {
- ret = copy_to_iter(data, ret, &io_data->data);
-- if (unlikely(iov_iter_count(&io_data->data)))
-+ if (!ret)
- ret = -EFAULT;
- }
- }
-@@ -3433,6 +3433,7 @@ done:
- static void ffs_closed(struct ffs_data *ffs)
- {
- struct ffs_dev *ffs_obj;
-+ struct f_fs_opts *opts;
-
- ENTER();
- ffs_dev_lock();
-@@ -3446,8 +3447,13 @@ static void ffs_closed(struct ffs_data *ffs)
- if (ffs_obj->ffs_closed_callback)
- ffs_obj->ffs_closed_callback(ffs);
-
-- if (!ffs_obj->opts || ffs_obj->opts->no_configfs
-- || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
-+ if (ffs_obj->opts)
-+ opts = ffs_obj->opts;
-+ else
-+ goto done;
-+
-+ if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
-+ || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
- goto done;
-
- unregister_gadget_item(ffs_obj->opts->
-diff --git a/fs/dcache.c b/fs/dcache.c
-index 922f23e..b05c557 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -2896,17 +2896,6 @@ restart:
- vfsmnt = &mnt->mnt;
- continue;
- }
-- /*
-- * Filesystems needing to implement special "root names"
-- * should do so with ->d_dname()
-- */
-- if (IS_ROOT(dentry) &&
-- (dentry->d_name.len != 1 ||
-- dentry->d_name.name[0] != '/')) {
-- WARN(1, "Root dentry has weird name <%.*s>\n",
-- (int) dentry->d_name.len,
-- dentry->d_name.name);
-- }
- if (!error)
- error = is_mounted(vfsmnt) ? 1 : 2;
- break;
-diff --git a/fs/inode.c b/fs/inode.c
-index f00b16f..c60671d 100644
---- a/fs/inode.c
-+++ b/fs/inode.c
-@@ -1693,8 +1693,8 @@ int file_remove_suid(struct file *file)
- error = security_inode_killpriv(dentry);
- if (!error && killsuid)
- error = __remove_suid(dentry, killsuid);
-- if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-- inode->i_flags |= S_NOSEC;
-+ if (!error)
-+ inode_has_no_xattr(inode);
-
- return error;
- }
-diff --git a/fs/namespace.c b/fs/namespace.c
-index 13b0f7b..f07c769 100644
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -3187,11 +3187,15 @@ bool fs_fully_visible(struct file_system_type *type)
- if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
- continue;
-
-- /* This mount is not fully visible if there are any child mounts
-- * that cover anything except for empty directories.
-+ /* This mount is not fully visible if there are any
-+ * locked child mounts that cover anything except for
-+ * empty directories.
- */
- list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
- struct inode *inode = child->mnt_mountpoint->d_inode;
-+ /* Only worry about locked mounts */
-+ if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
-+ continue;
- if (!S_ISDIR(inode->i_mode))
- goto next;
- if (inode->i_nlink > 2)
-diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
-index 2c10360..a7106ed 100644
---- a/fs/ufs/balloc.c
-+++ b/fs/ufs/balloc.c
-@@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
-
- if (ufs_fragnum(fragment) + count > uspi->s_fpg)
- ufs_error (sb, "ufs_free_fragments", "internal error");
--
-- lock_ufs(sb);
-+
-+ mutex_lock(&UFS_SB(sb)->s_lock);
-
- cgno = ufs_dtog(uspi, fragment);
- bit = ufs_dtogd(uspi, fragment);
-@@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
- if (sb->s_flags & MS_SYNCHRONOUS)
- ubh_sync_block(UCPI_UBH(ucpi));
- ufs_mark_sb_dirty(sb);
--
-- unlock_ufs(sb);
-+
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT\n");
- return;
-
- failed:
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT (FAILED)\n");
- return;
- }
-@@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
- goto failed;
- }
-
-- lock_ufs(sb);
-+ mutex_lock(&UFS_SB(sb)->s_lock);
-
- do_more:
- overflow = 0;
-@@ -211,12 +211,12 @@ do_more:
- }
-
- ufs_mark_sb_dirty(sb);
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT\n");
- return;
-
- failed_unlock:
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- failed:
- UFSD("EXIT (FAILED)\n");
- return;
-@@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- usb1 = ubh_get_usb_first(uspi);
- *err = -ENOSPC;
-
-- lock_ufs(sb);
-+ mutex_lock(&UFS_SB(sb)->s_lock);
- tmp = ufs_data_ptr_to_cpu(sb, p);
-
- if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
-@@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- "fragment %llu, tmp %llu\n",
- (unsigned long long)fragment,
- (unsigned long long)tmp);
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- return INVBLOCK;
- }
- if (fragment < UFS_I(inode)->i_lastfrag) {
- UFSD("EXIT (ALREADY ALLOCATED)\n");
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- return 0;
- }
- }
- else {
- if (tmp) {
- UFSD("EXIT (ALREADY ALLOCATED)\n");
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- return 0;
- }
- }
-@@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- * There is not enough space for user on the device
- */
- if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT (FAILED)\n");
- return 0;
- }
-@@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- ufs_clear_frags(inode, result + oldcount,
- newcount - oldcount, locked_page != NULL);
- }
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT, result %llu\n", (unsigned long long)result);
- return result;
- }
-@@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- fragment + count);
- ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
- locked_page != NULL);
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT, result %llu\n", (unsigned long long)result);
- return result;
- }
-@@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- *err = 0;
- UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
- fragment + count);
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- if (newcount < request)
- ufs_free_fragments (inode, result + newcount, request - newcount);
- ufs_free_fragments (inode, tmp, oldcount);
-@@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- return result;
- }
-
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT (FAILED)\n");
- return 0;
- }
-diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
-index 7caa016..fd0203c 100644
---- a/fs/ufs/ialloc.c
-+++ b/fs/ufs/ialloc.c
-@@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
-
- ino = inode->i_ino;
-
-- lock_ufs(sb);
-+ mutex_lock(&UFS_SB(sb)->s_lock);
-
- if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
- ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- return;
- }
-
-@@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
- bit = ufs_inotocgoff (ino);
- ucpi = ufs_load_cylinder (sb, cg);
- if (!ucpi) {
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- return;
- }
- ucg = ubh_get_ucg(UCPI_UBH(ucpi));
-@@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
- ubh_sync_block(UCPI_UBH(ucpi));
-
- ufs_mark_sb_dirty(sb);
-- unlock_ufs(sb);
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- UFSD("EXIT\n");
- }
-
-@@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
- sbi = UFS_SB(sb);
- uspi = sbi->s_uspi;
-
-- lock_ufs(sb);
-+ mutex_lock(&sbi->s_lock);
-
- /*
- * Try to place the inode in its parent directory
-@@ -331,21 +331,21 @@ cg_found:
- sync_dirty_buffer(bh);
- brelse(bh);
- }
-- unlock_ufs(sb);
-+ mutex_unlock(&sbi->s_lock);
-
- UFSD("allocating inode %lu\n", inode->i_ino);
- UFSD("EXIT\n");
- return inode;
-
- fail_remove_inode:
-- unlock_ufs(sb);
-+ mutex_unlock(&sbi->s_lock);
- clear_nlink(inode);
- unlock_new_inode(inode);
- iput(inode);
- UFSD("EXIT (FAILED): err %d\n", err);
- return ERR_PTR(err);
- failed:
-- unlock_ufs(sb);
-+ mutex_unlock(&sbi->s_lock);
- make_bad_inode(inode);
- iput (inode);
- UFSD("EXIT (FAILED): err %d\n", err);
-diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
-index be7d42c..2d93ab0 100644
---- a/fs/ufs/inode.c
-+++ b/fs/ufs/inode.c
-@@ -902,6 +902,9 @@ void ufs_evict_inode(struct inode * inode)
- invalidate_inode_buffers(inode);
- clear_inode(inode);
-
-- if (want_delete)
-+ if (want_delete) {
-+ lock_ufs(inode->i_sb);
- ufs_free_inode(inode);
-+ unlock_ufs(inode->i_sb);
-+ }
- }
-diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
-index fd65deb..e8ee298 100644
---- a/fs/ufs/namei.c
-+++ b/fs/ufs/namei.c
-@@ -128,12 +128,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
- if (l > sb->s_blocksize)
- goto out_notlocked;
-
-+ lock_ufs(dir->i_sb);
- inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
-- goto out_notlocked;
-+ goto out;
-
-- lock_ufs(dir->i_sb);
- if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
- /* slow symlink */
- inode->i_op = &ufs_symlink_inode_operations;
-@@ -174,7 +174,12 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
- inode_inc_link_count(inode);
- ihold(inode);
-
-- error = ufs_add_nondir(dentry, inode);
-+ error = ufs_add_link(dentry, inode);
-+ if (error) {
-+ inode_dec_link_count(inode);
-+ iput(inode);
-+ } else
-+ d_instantiate(dentry, inode);
- unlock_ufs(dir->i_sb);
- return error;
- }
-@@ -184,9 +189,13 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
- struct inode * inode;
- int err;
-
-+ lock_ufs(dir->i_sb);
-+ inode_inc_link_count(dir);
-+
- inode = ufs_new_inode(dir, S_IFDIR|mode);
-+ err = PTR_ERR(inode);
- if (IS_ERR(inode))
-- return PTR_ERR(inode);
-+ goto out_dir;
-
- inode->i_op = &ufs_dir_inode_operations;
- inode->i_fop = &ufs_dir_operations;
-@@ -194,9 +203,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
-
- inode_inc_link_count(inode);
-
-- lock_ufs(dir->i_sb);
-- inode_inc_link_count(dir);
--
- err = ufs_make_empty(inode, dir);
- if (err)
- goto out_fail;
-@@ -206,6 +212,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
- goto out_fail;
- unlock_ufs(dir->i_sb);
-
-+ unlock_new_inode(inode);
- d_instantiate(dentry, inode);
- out:
- return err;
-@@ -215,6 +222,7 @@ out_fail:
- inode_dec_link_count(inode);
- unlock_new_inode(inode);
- iput (inode);
-+out_dir:
- inode_dec_link_count(dir);
- unlock_ufs(dir->i_sb);
- goto out;
-diff --git a/fs/ufs/super.c b/fs/ufs/super.c
-index 8092d37..eb16791 100644
---- a/fs/ufs/super.c
-+++ b/fs/ufs/super.c
-@@ -694,6 +694,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
- unsigned flags;
-
- lock_ufs(sb);
-+ mutex_lock(&UFS_SB(sb)->s_lock);
-
- UFSD("ENTER\n");
-
-@@ -711,6 +712,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
- ufs_put_cstotal(sb);
-
- UFSD("EXIT\n");
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
-
- return 0;
-@@ -799,6 +801,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
- UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
-
- mutex_init(&sbi->mutex);
-+ mutex_init(&sbi->s_lock);
- spin_lock_init(&sbi->work_lock);
- INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
- /*
-@@ -1277,6 +1280,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
-
- sync_filesystem(sb);
- lock_ufs(sb);
-+ mutex_lock(&UFS_SB(sb)->s_lock);
- uspi = UFS_SB(sb)->s_uspi;
- flags = UFS_SB(sb)->s_flags;
- usb1 = ubh_get_usb_first(uspi);
-@@ -1290,6 +1294,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- new_mount_opt = 0;
- ufs_set_opt (new_mount_opt, ONERROR_LOCK);
- if (!ufs_parse_options (data, &new_mount_opt)) {
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return -EINVAL;
- }
-@@ -1297,12 +1302,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- new_mount_opt |= ufstype;
- } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
- pr_err("ufstype can't be changed during remount\n");
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return -EINVAL;
- }
-
- if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
- UFS_SB(sb)->s_mount_opt = new_mount_opt;
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return 0;
- }
-@@ -1326,6 +1333,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- */
- #ifndef CONFIG_UFS_FS_WRITE
- pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return -EINVAL;
- #else
-@@ -1335,11 +1343,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
- ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
- pr_err("this ufstype is read-only supported\n");
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return -EINVAL;
- }
- if (!ufs_read_cylinder_structures(sb)) {
- pr_err("failed during remounting\n");
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return -EPERM;
- }
-@@ -1347,6 +1357,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- #endif
- }
- UFS_SB(sb)->s_mount_opt = new_mount_opt;
-+ mutex_unlock(&UFS_SB(sb)->s_lock);
- unlock_ufs(sb);
- return 0;
- }
-diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
-index 2a07396..cf6368d 100644
---- a/fs/ufs/ufs.h
-+++ b/fs/ufs/ufs.h
-@@ -30,6 +30,7 @@ struct ufs_sb_info {
- int work_queued; /* non-zero if the delayed work is queued */
- struct delayed_work sync_work; /* FS sync delayed work */
- spinlock_t work_lock; /* protects sync_work and work_queued */
-+ struct mutex s_lock;
- };
-
- struct ufs_inode_info {
-diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
-index 3573a81..8ba379f 100644
---- a/include/net/netns/sctp.h
-+++ b/include/net/netns/sctp.h
-@@ -31,6 +31,7 @@ struct netns_sctp {
- struct list_head addr_waitq;
- struct timer_list addr_wq_timer;
- struct list_head auto_asconf_splist;
-+ /* Lock that protects both addr_waitq and auto_asconf_splist */
- spinlock_t addr_wq_lock;
-
- /* Lock that protects the local_addr_list writers */
-diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
-index 2bb2fcf..495c87e 100644
---- a/include/net/sctp/structs.h
-+++ b/include/net/sctp/structs.h
-@@ -223,6 +223,10 @@ struct sctp_sock {
- atomic_t pd_mode;
- /* Receive to here while partial delivery is in effect. */
- struct sk_buff_head pd_lobby;
-+
-+ /* These must be the last fields, as they will skipped on copies,
-+ * like on accept and peeloff operations
-+ */
- struct list_head auto_asconf_list;
- int do_auto_asconf;
- };
-diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
-index a9a4a1b..8d423bc 100644
---- a/net/bridge/br_ioctl.c
-+++ b/net/bridge/br_ioctl.c
-@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
- return -EPERM;
-
-- spin_lock_bh(&br->lock);
- br_stp_set_bridge_priority(br, args[1]);
-- spin_unlock_bh(&br->lock);
- return 0;
-
- case BRCTL_SET_PORT_PRIORITY:
-diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index b0aee78..c08f510 100644
---- a/net/bridge/br_multicast.c
-+++ b/net/bridge/br_multicast.c
-@@ -1166,6 +1166,9 @@ static void br_multicast_add_router(struct net_bridge *br,
- struct net_bridge_port *p;
- struct hlist_node *slot = NULL;
-
-+ if (!hlist_unhashed(&port->rlist))
-+ return;
-+
- hlist_for_each_entry(p, &br->router_list, rlist) {
- if ((unsigned long) port >= (unsigned long) p)
- break;
-@@ -1193,12 +1196,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
- if (port->multicast_router != 1)
- return;
-
-- if (!hlist_unhashed(&port->rlist))
-- goto timer;
--
- br_multicast_add_router(br, port);
-
--timer:
- mod_timer(&port->multicast_router_timer,
- now + br->multicast_querier_interval);
- }
-diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
-index 4114687..7832d07 100644
---- a/net/bridge/br_stp_if.c
-+++ b/net/bridge/br_stp_if.c
-@@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
- return true;
- }
-
--/* called under bridge lock */
-+/* Acquires and releases bridge lock */
- void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
- {
- struct net_bridge_port *p;
- int wasroot;
-
-+ spin_lock_bh(&br->lock);
- wasroot = br_is_root_bridge(br);
-
- list_for_each_entry(p, &br->port_list, list) {
-@@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
- br_port_state_selection(br);
- if (br_is_root_bridge(br) && !wasroot)
- br_become_root_bridge(br);
-+ spin_unlock_bh(&br->lock);
- }
-
- /* called under bridge lock */
-diff --git a/net/core/neighbour.c b/net/core/neighbour.c
-index 70fe9e1..d0e5d66 100644
---- a/net/core/neighbour.c
-+++ b/net/core/neighbour.c
-@@ -971,6 +971,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
- rc = 0;
- if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
- goto out_unlock_bh;
-+ if (neigh->dead)
-+ goto out_dead;
-
- if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
- if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
-@@ -1027,6 +1029,13 @@ out_unlock_bh:
- write_unlock(&neigh->lock);
- local_bh_enable();
- return rc;
-+
-+out_dead:
-+ if (neigh->nud_state & NUD_STALE)
-+ goto out_unlock_bh;
-+ write_unlock_bh(&neigh->lock);
-+ kfree_skb(skb);
-+ return 1;
- }
- EXPORT_SYMBOL(__neigh_event_send);
-
-@@ -1090,6 +1099,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
- (old & (NUD_NOARP | NUD_PERMANENT)))
- goto out;
-+ if (neigh->dead)
-+ goto out;
-
- if (!(new & NUD_VALID)) {
- neigh_del_timer(neigh);
-@@ -1239,6 +1250,8 @@ EXPORT_SYMBOL(neigh_update);
- */
- void __neigh_set_probe_once(struct neighbour *neigh)
- {
-+ if (neigh->dead)
-+ return;
- neigh->updated = jiffies;
- if (!(neigh->nud_state & NUD_FAILED))
- return;
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index e9f9a15..1e3abb8 100644
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -4443,7 +4443,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
-
- while (order) {
- if (npages >= 1 << order) {
-- page = alloc_pages(gfp_mask |
-+ page = alloc_pages((gfp_mask & ~__GFP_WAIT) |
- __GFP_COMP |
- __GFP_NOWARN |
- __GFP_NORETRY,
-diff --git a/net/core/sock.c b/net/core/sock.c
-index 71e3e5f..c77d5d2 100644
---- a/net/core/sock.c
-+++ b/net/core/sock.c
-@@ -1895,7 +1895,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
-
- pfrag->offset = 0;
- if (SKB_FRAG_PAGE_ORDER) {
-- pfrag->page = alloc_pages(gfp | __GFP_COMP |
-+ pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP |
- __GFP_NOWARN | __GFP_NORETRY,
- SKB_FRAG_PAGE_ORDER);
- if (likely(pfrag->page)) {
-diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index d2e49ba..61edc49 100644
---- a/net/ipv4/af_inet.c
-+++ b/net/ipv4/af_inet.c
-@@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int backlog)
- err = 0;
- if (err)
- goto out;
-+
-+ tcp_fastopen_init_key_once(true);
- }
- err = inet_csk_listen_start(sk, backlog);
- if (err)
-diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
-index 5cd9927..d9e8ff3 100644
---- a/net/ipv4/ip_sockglue.c
-+++ b/net/ipv4/ip_sockglue.c
-@@ -432,6 +432,15 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
- kfree_skb(skb);
- }
-
-+/* For some errors we have valid addr_offset even with zero payload and
-+ * zero port. Also, addr_offset should be supported if port is set.
-+ */
-+static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
-+{
-+ return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
-+ serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
-+}
-+
- /* IPv4 supports cmsg on all imcp errors and some timestamps
- *
- * Timestamp code paths do not initialize the fields expected by cmsg:
-@@ -498,7 +507,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
-
- serr = SKB_EXT_ERR(skb);
-
-- if (sin && serr->port) {
-+ if (sin && ipv4_datagram_support_addr(serr)) {
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
- serr->addr_offset);
-diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
-index 995a225..d03a344 100644
---- a/net/ipv4/tcp.c
-+++ b/net/ipv4/tcp.c
-@@ -2541,10 +2541,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
-
- case TCP_FASTOPEN:
- if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
-- TCPF_LISTEN)))
-+ TCPF_LISTEN))) {
-+ tcp_fastopen_init_key_once(true);
-+
- err = fastopen_init_queue(sk, val);
-- else
-+ } else {
- err = -EINVAL;
-+ }
- break;
- case TCP_TIMESTAMP:
- if (!tp->repair)
-diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
-index ea82fd4..9c37181 100644
---- a/net/ipv4/tcp_fastopen.c
-+++ b/net/ipv4/tcp_fastopen.c
-@@ -78,8 +78,6 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
- struct tcp_fastopen_context *ctx;
- bool ok = false;
-
-- tcp_fastopen_init_key_once(true);
--
- rcu_read_lock();
- ctx = rcu_dereference(tcp_fastopen_ctx);
- if (ctx) {
-diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
-index ace8dac..d174b91 100644
---- a/net/ipv6/datagram.c
-+++ b/net/ipv6/datagram.c
-@@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
- kfree_skb(skb);
- }
-
-+/* For some errors we have valid addr_offset even with zero payload and
-+ * zero port. Also, addr_offset should be supported if port is set.
-+ */
-+static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
-+{
-+ return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
-+ serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
-+ serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
-+}
-+
- /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
- *
- * At one point, excluding local errors was a quick test to identify icmp/icmp6
-@@ -389,7 +399,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
-
- serr = SKB_EXT_ERR(skb);
-
-- if (sin && serr->port) {
-+ if (sin && ipv6_datagram_support_addr(serr)) {
- const unsigned char *nh = skb_network_header(skb);
- sin->sin6_family = AF_INET6;
- sin->sin6_flowinfo = 0;
-diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
-index 46214f2..2c75361 100644
---- a/net/netfilter/nft_rbtree.c
-+++ b/net/netfilter/nft_rbtree.c
-@@ -37,10 +37,11 @@ static bool nft_rbtree_lookup(const struct nft_set *set,
- {
- const struct nft_rbtree *priv = nft_set_priv(set);
- const struct nft_rbtree_elem *rbe, *interval = NULL;
-- const struct rb_node *parent = priv->root.rb_node;
-+ const struct rb_node *parent;
- int d;
-
- spin_lock_bh(&nft_rbtree_lock);
-+ parent = priv->root.rb_node;
- while (parent != NULL) {
- rbe = rb_entry(parent, struct nft_rbtree_elem, node);
-
-@@ -158,7 +159,6 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
- struct nft_rbtree_elem *rbe;
- int d;
-
-- spin_lock_bh(&nft_rbtree_lock);
- while (parent != NULL) {
- rbe = rb_entry(parent, struct nft_rbtree_elem, node);
-
-@@ -173,11 +173,9 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
- !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
- nft_data_copy(&elem->data, rbe->data);
- elem->flags = rbe->flags;
-- spin_unlock_bh(&nft_rbtree_lock);
- return 0;
- }
- }
-- spin_unlock_bh(&nft_rbtree_lock);
- return -ENOENT;
- }
-
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index f8db706..bfe5c69 100644
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -1266,16 +1266,6 @@ static void packet_sock_destruct(struct sock *sk)
- sk_refcnt_debug_dec(sk);
- }
-
--static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
--{
-- int x = atomic_read(&f->rr_cur) + 1;
--
-- if (x >= num)
-- x = 0;
--
-- return x;
--}
--
- static unsigned int fanout_demux_hash(struct packet_fanout *f,
- struct sk_buff *skb,
- unsigned int num)
-@@ -1287,13 +1277,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
- struct sk_buff *skb,
- unsigned int num)
- {
-- int cur, old;
-+ unsigned int val = atomic_inc_return(&f->rr_cur);
-
-- cur = atomic_read(&f->rr_cur);
-- while ((old = atomic_cmpxchg(&f->rr_cur, cur,
-- fanout_rr_next(f, num))) != cur)
-- cur = old;
-- return cur;
-+ return val % num;
- }
-
- static unsigned int fanout_demux_cpu(struct packet_fanout *f,
-@@ -1347,7 +1333,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
- {
- struct packet_fanout *f = pt->af_packet_priv;
-- unsigned int num = f->num_members;
-+ unsigned int num = READ_ONCE(f->num_members);
- struct packet_sock *po;
- unsigned int idx;
-
-diff --git a/net/sctp/output.c b/net/sctp/output.c
-index fc5e45b..abe7c2d 100644
---- a/net/sctp/output.c
-+++ b/net/sctp/output.c
-@@ -599,7 +599,9 @@ out:
- return err;
- no_route:
- kfree_skb(nskb);
-- IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
-+
-+ if (asoc)
-+ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
-
- /* FIXME: Returning the 'err' will effect all the associations
- * associated with a socket, although only one of the paths of the
-diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index aafe94b..4e56571 100644
---- a/net/sctp/socket.c
-+++ b/net/sctp/socket.c
-@@ -1533,8 +1533,10 @@ static void sctp_close(struct sock *sk, long timeout)
-
- /* Supposedly, no process has access to the socket, but
- * the net layers still may.
-+ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
-+ * held and that should be grabbed before socket lock.
- */
-- local_bh_disable();
-+ spin_lock_bh(&net->sctp.addr_wq_lock);
- bh_lock_sock(sk);
-
- /* Hold the sock, since sk_common_release() will put sock_put()
-@@ -1544,7 +1546,7 @@ static void sctp_close(struct sock *sk, long timeout)
- sk_common_release(sk);
-
- bh_unlock_sock(sk);
-- local_bh_enable();
-+ spin_unlock_bh(&net->sctp.addr_wq_lock);
-
- sock_put(sk);
-
-@@ -3587,6 +3589,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
- if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
- return 0;
-
-+ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
- if (val == 0 && sp->do_auto_asconf) {
- list_del(&sp->auto_asconf_list);
- sp->do_auto_asconf = 0;
-@@ -3595,6 +3598,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
- &sock_net(sk)->sctp.auto_asconf_splist);
- sp->do_auto_asconf = 1;
- }
-+ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
- return 0;
- }
-
-@@ -4128,18 +4132,28 @@ static int sctp_init_sock(struct sock *sk)
- local_bh_disable();
- percpu_counter_inc(&sctp_sockets_allocated);
- sock_prot_inuse_add(net, sk->sk_prot, 1);
-+
-+ /* Nothing can fail after this block, otherwise
-+ * sctp_destroy_sock() will be called without addr_wq_lock held
-+ */
- if (net->sctp.default_auto_asconf) {
-+ spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
- list_add_tail(&sp->auto_asconf_list,
- &net->sctp.auto_asconf_splist);
- sp->do_auto_asconf = 1;
-- } else
-+ spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
-+ } else {
- sp->do_auto_asconf = 0;
-+ }
-+
- local_bh_enable();
-
- return 0;
- }
-
--/* Cleanup any SCTP per socket resources. */
-+/* Cleanup any SCTP per socket resources. Must be called with
-+ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
-+ */
- static void sctp_destroy_sock(struct sock *sk)
- {
- struct sctp_sock *sp;
-@@ -7202,6 +7216,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
- newinet->mc_list = NULL;
- }
-
-+static inline void sctp_copy_descendant(struct sock *sk_to,
-+ const struct sock *sk_from)
-+{
-+ int ancestor_size = sizeof(struct inet_sock) +
-+ sizeof(struct sctp_sock) -
-+ offsetof(struct sctp_sock, auto_asconf_list);
-+
-+ if (sk_from->sk_family == PF_INET6)
-+ ancestor_size += sizeof(struct ipv6_pinfo);
-+
-+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
-+}
-+
- /* Populate the fields of the newsk from the oldsk and migrate the assoc
- * and its messages to the newsk.
- */
-@@ -7216,7 +7243,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
- struct sk_buff *skb, *tmp;
- struct sctp_ulpevent *event;
- struct sctp_bind_hashbucket *head;
-- struct list_head tmplist;
-
- /* Migrate socket buffer sizes and all the socket level options to the
- * new socket.
-@@ -7224,12 +7250,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
- newsk->sk_sndbuf = oldsk->sk_sndbuf;
- newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
- /* Brute force copy old sctp opt. */
-- if (oldsp->do_auto_asconf) {
-- memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
-- inet_sk_copy_descendant(newsk, oldsk);
-- memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
-- } else
-- inet_sk_copy_descendant(newsk, oldsk);
-+ sctp_copy_descendant(newsk, oldsk);
-
- /* Restore the ep value that was overwritten with the above structure
- * copy.
-diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 4d1a541..2588e08 100644
---- a/security/selinux/hooks.c
-+++ b/security/selinux/hooks.c
-@@ -404,6 +404,7 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
- return sbsec->behavior == SECURITY_FS_USE_XATTR ||
- sbsec->behavior == SECURITY_FS_USE_TRANS ||
- sbsec->behavior == SECURITY_FS_USE_TASK ||
-+ sbsec->behavior == SECURITY_FS_USE_NATIVE ||
- /* Special handling. Genfs but also in-core setxattr handler */
- !strcmp(sb->s_type->name, "sysfs") ||
- !strcmp(sb->s_type->name, "pstore") ||
diff --git a/4.0.8/0000_README b/4.1.3/0000_README
index 919b754..8d6de14 100644
--- a/4.0.8/0000_README
+++ b/4.1.3/0000_README
@@ -2,11 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1007_linux-4.0.8.patch
-From: http://www.kernel.org
-Desc: Linux 4.0.8
-
-Patch: 4420_grsecurity-3.1-4.0.8-201507111211.patch
+Patch: 4420_grsecurity-3.1-4.1.3-201507251419.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.0.8/4420_grsecurity-3.1-4.0.8-201507111211.patch b/4.1.3/4420_grsecurity-3.1-4.1.3-201507251419.patch
index c0c4b69..723abab 100644
--- a/4.0.8/4420_grsecurity-3.1-4.0.8-201507111211.patch
+++ b/4.1.3/4420_grsecurity-3.1-4.1.3-201507251419.patch
@@ -313,10 +313,10 @@ index 74b6c6d..eac0e77 100644
A typical pattern in a Kbuild file looks like this:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 4d68ec8..9546b75 100644
+index 6726139..c825c0a 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
-@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -1223,6 +1223,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
Default: 1024
@@ -330,7 +330,7 @@ index 4d68ec8..9546b75 100644
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
-@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2333,6 +2340,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
@@ -341,7 +341,7 @@ index 4d68ec8..9546b75 100644
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2631,6 +2642,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -372,11 +372,44 @@ index 4d68ec8..9546b75 100644
pcbit= [HW,ISDN]
pcd. [PARIDE]
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index c831001..1bfbbf6 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print [ X86 only ]
+ - l2cr [ PPC only ]
++- modify_ldt [ X86 only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msg_next_id [ sysv ipc ]
+@@ -391,6 +392,20 @@ This flag controls the L2 cache of G3 processor boards. If
+
+ ==============================================================
+
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+
+ A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index 0e315d6..68f608f 100644
+index e3cdec4..56ae73d 100644
--- a/Makefile
+++ b/Makefile
-@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+@@ -299,7 +299,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
HOSTCC = gcc
HOSTCXX = g++
HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
@@ -387,7 +420,7 @@ index 0e315d6..68f608f 100644
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
-@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
+@@ -444,8 +446,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
# Rules shared between *config targets and build targets
# Basic helpers built in scripts/
@@ -398,7 +431,7 @@ index 0e315d6..68f608f 100644
$(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
-@@ -622,6 +624,74 @@ endif
+@@ -620,6 +622,74 @@ endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
@@ -473,7 +506,7 @@ index 0e315d6..68f608f 100644
ifdef CONFIG_READABLE_ASM
# Disable optimizations that make assembler listings hard to read.
# reorder blocks reorders the control in the function
-@@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+@@ -712,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
@@ -482,7 +515,7 @@ index 0e315d6..68f608f 100644
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -884,7 +954,7 @@ export mod_sign_cmd
+@@ -883,7 +953,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -491,7 +524,7 @@ index 0e315d6..68f608f 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -934,6 +1004,8 @@ endif
+@@ -933,6 +1003,8 @@ endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -500,7 +533,7 @@ index 0e315d6..68f608f 100644
$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -943,7 +1015,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+@@ -942,7 +1014,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -509,7 +542,7 @@ index 0e315d6..68f608f 100644
$(Q)$(MAKE) $(build)=$@
define filechk_kernel.release
-@@ -986,10 +1058,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
+@@ -985,10 +1057,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
archprepare: archheaders archscripts prepare1 scripts_basic
@@ -523,7 +556,7 @@ index 0e315d6..68f608f 100644
prepare: prepare0
# Generate some files
-@@ -1103,6 +1178,8 @@ all: modules
+@@ -1096,6 +1171,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -532,7 +565,7 @@ index 0e315d6..68f608f 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1118,7 +1195,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1111,7 +1188,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -541,7 +574,7 @@ index 0e315d6..68f608f 100644
# Target to install modules
PHONY += modules_install
-@@ -1184,7 +1261,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+@@ -1177,7 +1254,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.priv signing_key.x509 x509.genkey \
extra_certificates signing_key.x509.keyid \
@@ -553,7 +586,7 @@ index 0e315d6..68f608f 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1223,7 +1303,7 @@ distclean: mrproper
+@@ -1216,7 +1296,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -562,7 +595,7 @@ index 0e315d6..68f608f 100644
-type f -print | xargs rm -f
-@@ -1389,6 +1469,8 @@ PHONY += $(module-dirs) modules
+@@ -1382,6 +1462,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -571,7 +604,7 @@ index 0e315d6..68f608f 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1529,17 +1611,21 @@ else
+@@ -1522,17 +1604,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -597,7 +630,7 @@ index 0e315d6..68f608f 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1551,11 +1637,15 @@ endif
+@@ -1544,11 +1630,15 @@ endif
$(build)=$(build-dir)
# Make sure the latest headers are built for Documentation
Documentation/: headers_install
@@ -731,10 +764,10 @@ index 2fd00b7..cfd5069 100644
for (i = 0; i < n; i++) {
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index e51f578..16c64a3 100644
+index 36dc91a..6769cb0 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
-@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+@@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
generic version except that we know how to honor ADDR_LIMIT_32BIT. */
static unsigned long
@@ -748,7 +781,7 @@ index e51f578..16c64a3 100644
info.flags = 0;
info.length = len;
-@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+@@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
info.high_limit = limit;
info.align_mask = 0;
info.align_offset = 0;
@@ -756,7 +789,7 @@ index e51f578..16c64a3 100644
return vm_unmapped_area(&info);
}
-@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
@@ -946,10 +979,10 @@ index 9d0ac09..479a962 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index cf4c0c9..a87ecf5 100644
+index 45df48b..952017a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
+@@ -1716,7 +1716,7 @@ config ALIGNMENT_TRAP
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
@@ -958,7 +991,7 @@ index cf4c0c9..a87ecf5 100644
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
-@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
+@@ -1951,6 +1951,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP)
@@ -1680,10 +1713,10 @@ index 6ddbe44..b5e38b1a 100644
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
-index 674d03f..9a0bac0 100644
+index d2315ff..f60b47b 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
-@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
@@ -1699,17 +1732,6 @@ index 674d03f..9a0bac0 100644
/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
-@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
- extern void elf_set_personality(const struct elf32_hdr *);
- #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- #ifdef CONFIG_MMU
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- struct linux_binprm;
diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
index de53547..52b9a28 100644
--- a/arch/arm/include/asm/fncpy.h
@@ -1725,7 +1747,7 @@ index de53547..52b9a28 100644
(unsigned long)(dest_buf) + (size)); \
\
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
-index 53e69da..3fdc896 100644
+index 4e78065..f265b48 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
@@ -2110,10 +2132,10 @@ index 18f5a55..5072a40 100644
struct of_cpu_method {
const char *method;
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
-index 72812a1..335f4f3 100644
+index bd32ede..bd90a0b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
-@@ -77,9 +77,9 @@ struct thread_info {
+@@ -74,9 +74,9 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
@@ -2126,7 +2148,7 @@ index 72812a1..335f4f3 100644
}
#define init_thread_info (init_thread_union.thread_info)
-@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
@@ -2139,7 +2161,7 @@ index 72812a1..335f4f3 100644
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+@@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
@@ -2175,7 +2197,7 @@ index 5f833f7..76e6644 100644
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index ce0786e..a80c264 100644
+index 74b17d0..57a4bf4 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -18,6 +18,7 @@
@@ -2406,7 +2428,7 @@ index a88671c..1cc895e 100644
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index 672b219..4aa120a 100644
+index 570306c..c87f193 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -48,6 +48,87 @@
@@ -2576,7 +2598,7 @@ index 672b219..4aa120a 100644
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
*/
- .pushsection .fixup, "ax"
+ .pushsection .text.fixup, "ax"
.align 2
-4: str r4, [sp, #S_PC] @ retry current instruction
+4: pax_close_userland
@@ -2770,7 +2792,7 @@ index 059c3da..8e45cfc 100644
flush_icache_range((unsigned long)base + offset, offset +
length);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
-index 0196327..50ac8895 100644
+index 3637973..cb29657 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -444,7 +444,7 @@ __enable_mmu:
@@ -2783,7 +2805,7 @@ index 0196327..50ac8895 100644
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
-index 2e11961..07f0704 100644
+index af791f4..3ff9821 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -38,12 +38,39 @@
@@ -2849,27 +2871,10 @@ index 69bda1a..755113a 100644
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 2bf1a16..d959d40 100644
+index f192a2a..1a40523 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -213,6 +213,7 @@ void machine_power_off(void)
-
- if (pm_power_off)
- pm_power_off();
-+ BUG();
- }
-
- /*
-@@ -226,7 +227,7 @@ void machine_power_off(void)
- * executing pre-reset code, and using RAM that the primary CPU's code wishes
- * to use. Implementing such co-ordination would be essentially impossible.
- */
--void machine_restart(char *cmd)
-+__noreturn void machine_restart(char *cmd)
- {
- local_irq_disable();
- smp_send_stop();
-@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
+@@ -105,8 +105,8 @@ void __show_regs(struct pt_regs *regs)
show_regs_print_info(KERN_DEFAULT);
@@ -2880,7 +2885,7 @@ index 2bf1a16..d959d40 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -283,12 +283,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -2893,7 +2898,7 @@ index 2bf1a16..d959d40 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
+@@ -304,7 +298,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -2902,12 +2907,12 @@ index 2bf1a16..d959d40 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+@@ -333,91 +327,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return is_gate_vma(vma) ? "[vectors]" : NULL;
}
-/* If possible, provide a placement hint at a random offset from the
-- * stack for the signal page.
+- * stack for the sigpage and vdso pages.
- */
-static unsigned long sigpage_addr(const struct mm_struct *mm,
- unsigned int npages)
@@ -2951,6 +2956,7 @@ index 2bf1a16..d959d40 100644
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+- unsigned long npages;
- unsigned long addr;
- unsigned long hint;
- int ret = 0;
@@ -2959,10 +2965,13 @@ index 2bf1a16..d959d40 100644
- signal_page = get_signal_page();
- if (!signal_page)
- return -ENOMEM;
+-
+- npages = 1; /* for sigpage */
+- npages += vdso_total_pages;
down_write(&mm->mmap_sem);
-- hint = sigpage_addr(mm, 1);
-- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
+- hint = sigpage_addr(mm, npages);
+- addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
@@ -2979,6 +2988,12 @@ index 2bf1a16..d959d40 100644
-
- mm->context.sigpage = addr;
-
+- /* Unlike the sigpage, failure to install the vdso is unlikely
+- * to be fatal to the process, so no error check needed
+- * here.
+- */
+- arm_install_vdso(mm, addr + PAGE_SIZE);
+-
- up_fail:
+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
up_write(&mm->mmap_sem);
@@ -3023,8 +3038,20 @@ index ef9119f..31995a3 100644
/* Do the secure computing check first; failures should be fast. */
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
if (secure_computing() == -1)
+diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
+index 1a4d232..2677169 100644
+--- a/arch/arm/kernel/reboot.c
++++ b/arch/arm/kernel/reboot.c
+@@ -122,6 +122,7 @@ void machine_power_off(void)
+
+ if (pm_power_off)
+ pm_power_off();
++ while (1);
+ }
+
+ /*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index 1d60beb..4aa25d5 100644
+index 6c777e9..3d2d0ca 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
@@ -3073,7 +3100,7 @@ index 1d60beb..4aa25d5 100644
cpu_arch = CPU_ARCH_ARMv6;
else
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
-index 023ac90..0a69950 100644
+index 423663e..bfeb0ff 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -24,8 +24,6 @@
@@ -3085,7 +3112,7 @@ index 023ac90..0a69950 100644
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
-@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+@@ -385,8 +383,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
* except when the MPU has protected the vectors
* page from PL0
*/
@@ -3095,7 +3122,7 @@ index 023ac90..0a69950 100644
} else
#endif
{
-@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+@@ -592,33 +589,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
@@ -3130,7 +3157,7 @@ index 023ac90..0a69950 100644
- return page;
-}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 86ef244..c518451 100644
+index cca5b87..68f0f73 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -76,7 +76,7 @@ enum ipi_msg_type {
@@ -3166,7 +3193,7 @@ index 7a3be1d..b00c7de 100644
start, end);
itcm_present = true;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 788e23f..6fa06a1 100644
+index 3dce1a3..60e857f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3197,7 +3224,7 @@ index 788e23f..6fa06a1 100644
if (signr)
do_exit(signr);
}
-@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
+@@ -878,7 +883,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -3211,7 +3238,7 @@ index 788e23f..6fa06a1 100644
/*
* on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
-index b31aa73..cc4b7a1 100644
+index 8b60fde..8d986dd 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -37,7 +37,7 @@
@@ -3223,7 +3250,7 @@ index b31aa73..cc4b7a1 100644
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
-@@ -123,6 +123,8 @@ SECTIONS
+@@ -120,6 +120,8 @@ SECTIONS
#ifdef CONFIG_DEBUG_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
@@ -3232,7 +3259,7 @@ index b31aa73..cc4b7a1 100644
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
-@@ -153,8 +155,6 @@ SECTIONS
+@@ -150,8 +152,6 @@ SECTIONS
NOTES
@@ -3242,7 +3269,7 @@ index b31aa73..cc4b7a1 100644
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index b652af5..60231ab 100644
+index d9631ec..b0c966c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
@@ -3254,7 +3281,7 @@ index b652af5..60231ab 100644
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
+@@ -373,7 +373,7 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
@@ -3263,7 +3290,7 @@ index b652af5..60231ab 100644
}
/**
-@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -406,7 +406,7 @@ static void update_vttbr(struct kvm *kvm)
/* First user of a new VMID generation? */
if (unlikely(kvm_next_vmid == 0)) {
@@ -3272,7 +3299,7 @@ index b652af5..60231ab 100644
kvm_next_vmid = 1;
/*
-@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -423,7 +423,7 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
@@ -3281,7 +3308,7 @@ index b652af5..60231ab 100644
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
-@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+@@ -1098,7 +1098,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
@@ -3291,7 +3318,7 @@ index b652af5..60231ab 100644
int err;
int ret, cpu;
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
-index 14a0d98..7771a7d 100644
+index 1710fd7..ec3e014 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
@@ -3319,7 +3346,7 @@ index 14a0d98..7771a7d 100644
+ENDPROC(___clear_user)
ENDPROC(__clear_user_std)
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9..73a0556 100644
--- a/arch/arm/lib/copy_from_user.S
@@ -3360,7 +3387,7 @@ index 6ee2f67..d1cce76 100644
#include <asm/asm-offsets.h>
#include <asm/cache.h>
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
-index a9d3db1..164b089 100644
+index 9648b06..19c333c 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
@@ -3385,9 +3412,9 @@ index a9d3db1..164b089 100644
+ENDPROC(___copy_to_user)
ENDPROC(__copy_to_user_std)
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
-index 7d08b43..f7ca7ea 100644
+index 1d0957e..f708846 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -57,8 +57,8 @@
@@ -3402,7 +3429,7 @@ index 7d08b43..f7ca7ea 100644
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
-index 312d43e..21d2322 100644
+index 8044591..c9b2609 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -29,7 +29,7 @@
@@ -3437,18 +3464,10 @@ index 3e58d71..029817c 100644
/* See rational for this in __copy_to_user() above. */
if (n < 64)
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
-index 582ef2d..d314e82 100644
+index 7d23ce0..5ef383a 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
-@@ -18,6 +18,7 @@
- #include <linux/syscore_ops.h>
- #include <linux/cpu_pm.h>
- #include <linux/io.h>
-+#include <linux/irq.h>
- #include <linux/irqchip/arm-gic.h>
- #include <linux/err.h>
- #include <linux/regulator/machine.h>
-@@ -635,8 +636,10 @@ void __init exynos_pm_init(void)
+@@ -738,8 +738,10 @@ void __init exynos_pm_init(void)
tmp |= pm_data->wake_disable_mask;
pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
@@ -3544,10 +3563,10 @@ index 5305ec7..6d74045 100644
#include <asm/smp_scu.h>
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
-index f961c46..4a453dc 100644
+index 3b56722..33ac281 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
-@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
+@@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
return NOTIFY_OK;
}
@@ -3557,7 +3576,7 @@ index f961c46..4a453dc 100644
};
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
-index be9541e..821805f 100644
+index 166b18f..f985f04 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
@@ -3598,10 +3617,10 @@ index 78c02b3..c94109a 100644
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index 355b089..2c9d7c3 100644
+index 752969f..a34b446 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
-@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
+@@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
int (*init_clkdm)(struct omap_hwmod *oh);
void (*update_context_lost)(struct omap_hwmod *oh);
int (*get_context_lost)(struct omap_hwmod *oh);
@@ -3662,10 +3681,10 @@ index ff0a68c..b312aa0 100644
sizeof(struct omap_wd_timer_platform_data));
WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
-index a351eff..87baad9 100644
+index 7469347..1ecc350 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
-@@ -178,7 +178,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
+@@ -177,7 +177,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
bool entered_lp2 = false;
if (tegra_pending_sgi())
@@ -3675,7 +3694,7 @@ index a351eff..87baad9 100644
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
-index ab95f53..4b977a7 100644
+index 3b9098d..15b390f 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -20,6 +20,7 @@
@@ -3729,7 +3748,7 @@ index 52d768f..5f93180 100644
#include "common.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
-index 9b4f29e..bbf3bfa 100644
+index b4f92b9..ffefea9 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -446,6 +446,7 @@ config CPU_32v5
@@ -3767,7 +3786,7 @@ index 9b4f29e..bbf3bfa 100644
If all of the binaries and libraries which run on your platform
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
-index 2c0c541..4585df9 100644
+index 9769f1e..16aaa55 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -216,10 +216,12 @@ union offset_union {
@@ -3832,7 +3851,7 @@ index 2c0c541..4585df9 100644
goto fault; \
} while (0)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
-index 8f15f70..d599a2b 100644
+index e309c8f..f8965e8 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -43,7 +43,7 @@ struct l2c_init_data {
@@ -4143,10 +4162,10 @@ index cf08bdf..772656c 100644
unsigned long search_exception_table(unsigned long addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
-index 1609b02..def0785 100644
+index be92fa0..5252d7e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
-@@ -755,7 +755,46 @@ void free_tcmmem(void)
+@@ -709,7 +709,46 @@ void free_tcmmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
@@ -4210,7 +4229,7 @@ index d1e5ad7..84dcbf2 100644
return __arm_ioremap_caller(phys_addr, size, mtype,
__builtin_return_address(0));
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index 5e85ed3..b10a7ed 100644
+index 407dc78..047ce9d 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -4287,7 +4306,7 @@ index 5e85ed3..b10a7ed 100644
addr = vm_unmapped_area(&info);
/*
-@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -183,14 +193,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
@@ -4295,10 +4314,8 @@ index 5e85ed3..b10a7ed 100644
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
- /* 8 bits of randomness in 20 address space bits */
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE))
-@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -4705,7 +4722,7 @@ index 7186382..0c145cf 100644
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index f412b53..fc89433 100644
+index e0e2358..a4ee460 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -20,6 +20,7 @@
@@ -4716,7 +4733,7 @@ index f412b53..fc89433 100644
#include "bpf_jit_32.h"
-@@ -71,7 +72,11 @@ struct jit_ctx {
+@@ -72,34 +73,58 @@ struct jit_ctx {
#endif
};
@@ -4726,9 +4743,62 @@ index f412b53..fc89433 100644
int bpf_jit_enable __read_mostly;
+#endif
- static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
++static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
++ unsigned int size)
++{
++ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
++
++ if (!ptr)
++ return -EFAULT;
++ memcpy(ret, ptr, size);
++ return 0;
++}
++
++static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
+ {
+ u8 ret;
+ int err;
+
+- err = skb_copy_bits(skb, offset, &ret, 1);
++ if (offset < 0)
++ err = call_neg_helper(skb, offset, &ret, 1);
++ else
++ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+ }
+
+-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
+ {
+ u16 ret;
+ int err;
+
+- err = skb_copy_bits(skb, offset, &ret, 2);
++ if (offset < 0)
++ err = call_neg_helper(skb, offset, &ret, 2);
++ else
++ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+ }
+
+-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
{
-@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+ u32 ret;
+ int err;
+
+- err = skb_copy_bits(skb, offset, &ret, 4);
++ if (offset < 0)
++ err = call_neg_helper(skb, offset, &ret, 4);
++ else
++ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+ }
+@@ -179,8 +204,10 @@ static void jit_fill_hole(void *area, unsigned int size)
{
u32 *ptr;
/* We are guaranteed to have aligned memory. */
@@ -4739,6 +4809,57 @@ index f412b53..fc89433 100644
}
static void build_prologue(struct jit_ctx *ctx)
+@@ -536,9 +563,6 @@ static int build_body(struct jit_ctx *ctx)
+ case BPF_LD | BPF_B | BPF_ABS:
+ load_order = 0;
+ load:
+- /* the interpreter will deal with the negative K */
+- if ((int)k < 0)
+- return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+ load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -547,12 +571,24 @@ load_common:
+ emit(ARM_SUB_I(r_scratch, r_skb_hl,
+ 1 << load_order), ctx);
+ emit(ARM_CMP_R(r_scratch, r_off), ctx);
+- condt = ARM_COND_HS;
++ condt = ARM_COND_GE;
+ } else {
+ emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
+ condt = ARM_COND_HI;
+ }
+
++ /*
++ * test for negative offset, only if we are
++ * currently scheduled to take the fast
++ * path. this will update the flags so that
++ * the slowpath instruction are ignored if the
++ * offset is negative.
++ *
++ * for loard_order == 0 the HI condition will
++ * make loads at offset 0 take the slow path too.
++ */
++ _emit(condt, ARM_CMP_I(r_off, 0), ctx);
++
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
+@@ -860,9 +896,11 @@ b_epilogue:
+ off = offsetof(struct sk_buff, vlan_tci);
+ emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
+- OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
+- else
+- OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
++ OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
++ else {
++ OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
++ OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
++ }
+ break;
+ case BPF_ANC | SKF_AD_QUEUE:
+ ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
index 5b217f4..c23f40e 100644
--- a/arch/arm/plat-iop/setup.c
@@ -4786,7 +4907,7 @@ index 7047051..44e8675 100644
#endif
#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
-index a5abb00..9cbca9a 100644
+index 71f19c4..2b13cfe 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,7 +44,7 @@
@@ -4824,7 +4945,7 @@ index 4fde8c1..441f84f 100644
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
-index e20df38..027ede3 100644
+index 7642056..bffc904 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -4836,9 +4957,9 @@ index e20df38..027ede3 100644
+ pud_populate(mm, pud, pmd);
+}
+
- #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+ #endif /* CONFIG_PGTABLE_LEVELS > 2 */
- #if CONFIG_ARM64_PGTABLE_LEVELS > 3
+ #if CONFIG_PGTABLE_LEVELS > 3
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 07e1ba44..ec8cbbb 100644
--- a/arch/arm64/include/asm/uaccess.h
@@ -4881,14 +5002,14 @@ index c3a58a1..78fbf54 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
-index d232888..87c8df1 100644
+index 0388ece..87c8df1 100644
--- a/arch/avr32/include/asm/elf.h
+++ b/arch/avr32/include/asm/elf.h
@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
--#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#ifdef CONFIG_PAX_ASLR
@@ -5114,10 +5235,10 @@ index 69952c18..4fa2908 100644
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index 074e52b..76afdac 100644
+index 76d25b2..d3793a0f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
-@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
+@@ -541,6 +541,7 @@ source "drivers/sn/Kconfig"
config KEXEC
bool "kexec system call"
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
@@ -5208,7 +5329,7 @@ index 5a83c5c..4d7f553 100644
/* IA-64 relocations: */
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
-index 5767cdf..7462574 100644
+index f5e70e9..624fad5 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
@@ -5238,7 +5359,7 @@ index 5767cdf..7462574 100644
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
-index 7b6f880..ac8e008 100644
+index 9f3ed9e..c99b418 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -12,7 +12,7 @@
@@ -5565,7 +5686,7 @@ index 52b7604b..455cb85 100644
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
-index 6b33457..88b5124 100644
+index a9b65cf..49ae1cf 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
@@ -5698,10 +5819,10 @@ index 4efe96a..60e8699 100644
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index 1a313c4..f27b613 100644
+index f501665..b107753 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
+@@ -2585,6 +2585,7 @@ source "kernel/Kconfig.preempt"
config KEXEC
bool "Kexec system call"
@@ -6358,10 +6479,10 @@ index b4db69f..8f3b093 100644
#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
-index 694925a..990fa62 100644
+index f19e890..a4f8177 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
-@@ -410,15 +410,18 @@ extern const char *__elf_platform;
+@@ -417,6 +417,13 @@ extern const char *__elf_platform;
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
@@ -6375,15 +6496,6 @@ index 694925a..990fa62 100644
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp);
-
--struct mm_struct;
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- struct arch_elf_state {
- int fp_abi;
- int interp_fp_abi;
diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
index c1f6afa..38cc6e9 100644
--- a/arch/mips/include/asm/exec.h
@@ -6501,10 +6613,10 @@ index 8feaed6..1bd8a64 100644
/**
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
-index 154b70a..426ae3d 100644
+index 89dd7fe..a123c97 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
-@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
#ifdef CONFIG_CPU_MIPS32
typedef struct { unsigned long pte_low, pte_high; } pte_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
@@ -6530,7 +6642,7 @@ index b336037..5b874cc 100644
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index f8f809f..b5f3fa4 100644
+index 819af9d..439839d 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,9 @@
@@ -6544,10 +6656,10 @@ index f8f809f..b5f3fa4 100644
struct vm_area_struct;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
-index 55ed660..3dc9422 100644
+index 9c0014e..5101ef5 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
-@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
+@@ -100,6 +100,9 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SECCOMP 4 /* secure computing */
#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
@@ -6557,7 +6669,7 @@ index 55ed660..3dc9422 100644
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_NOHZ 19 /* in adaptive nohz mode */
-@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
+@@ -135,14 +138,16 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_USEDMSA (1<<TIF_USEDMSA)
#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
@@ -6576,7 +6688,7 @@ index 55ed660..3dc9422 100644
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
-@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
+@@ -150,7 +155,7 @@ static inline struct thread_info *current_thread_info(void)
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
_TIF_WORK_SYSCALL_EXIT | \
@@ -6660,7 +6772,7 @@ index 44a1f79..2bd6aa3 100644
void __init gt641xx_irq_init(void)
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
-index be15e52..a089cc4 100644
+index 3c8a18a..b4929b6 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
@@ -6715,10 +6827,10 @@ index 0614717..002fa43 100644
/* Run the generated entry code */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
-index bf85cc1..b365c61 100644
+index f2975d4..f61d355 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
-@@ -535,18 +535,6 @@ out:
+@@ -541,18 +541,6 @@ out:
return pc;
}
@@ -6738,10 +6850,10 @@ index bf85cc1..b365c61 100644
{
struct pt_regs *regs;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
-index 5104528..950bbdc 100644
+index e933a30..0d02625 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
-@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -785,6 +785,10 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
@@ -6752,7 +6864,7 @@ index 5104528..950bbdc 100644
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
-@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+@@ -803,6 +807,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
tracehook_report_syscall_entry(regs))
ret = -1;
@@ -6764,38 +6876,6 @@ index 5104528..950bbdc 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
-diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
-index 07fc524..b9d7f28 100644
---- a/arch/mips/kernel/reset.c
-+++ b/arch/mips/kernel/reset.c
-@@ -13,6 +13,7 @@
- #include <linux/reboot.h>
-
- #include <asm/reboot.h>
-+#include <asm/bug.h>
-
- /*
- * Urgs ... Too many MIPS machines to handle this in a generic way.
-@@ -29,16 +30,19 @@ void machine_restart(char *command)
- {
- if (_machine_restart)
- _machine_restart(command);
-+ BUG();
- }
-
- void machine_halt(void)
- {
- if (_machine_halt)
- _machine_halt();
-+ BUG();
- }
-
- void machine_power_off(void)
- {
- if (pm_power_off)
- pm_power_off();
-+ BUG();
- }
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 2242bdd..b284048 100644
--- a/arch/mips/kernel/sync-r4k.c
@@ -6866,7 +6946,7 @@ index 2242bdd..b284048 100644
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index 33984c0..666a96d 100644
+index d2d1c19..3e21d8d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
@@ -6890,10 +6970,10 @@ index 33984c0..666a96d 100644
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index adf3886..ce8f002 100644
+index 52f205a..335927c 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
-@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+@@ -1013,7 +1013,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return r;
}
@@ -6946,7 +7026,7 @@ index 7ff8637..6004edb 100644
tsk->thread.error_code = write;
if (show_unhandled_signals &&
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
-index f1baadd..5472dca 100644
+index 5c81fdd..db158d3 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
@@ -6986,7 +7066,7 @@ index f1baadd..5472dca 100644
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -160,45 +166,34 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
@@ -6994,10 +7074,8 @@ index f1baadd..5472dca 100644
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
- if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
- random_factor = random_factor << PAGE_SHIFT;
-@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -7251,7 +7329,7 @@ index 78c9fd3..42fa66a 100644
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
-index d174372..f27fe5c 100644
+index 3a08eae..08fef28 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
@@ -7266,7 +7344,24 @@ index d174372..f27fe5c 100644
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+@@ -72,7 +77,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+
+ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ {
+- if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
++ if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
+ /*
+ * This is the permanent pmd attached to the pgd;
+ * cannot free it.
+@@ -81,6 +86,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ */
+ mm_inc_nr_pmds(mm);
+ return;
++ }
+ free_pages((unsigned long)pmd, PMD_ORDER);
+ }
+
+@@ -96,6 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
@@ -7275,10 +7370,10 @@ index d174372..f27fe5c 100644
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
-index 15207b9..3209e65 100644
+index 0a18375..d613939 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
-@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+@@ -213,6 +213,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
@@ -7677,10 +7772,10 @@ index e5120e6..8ddb5cc 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 22b0940..309f790 100644
+index 190cc48..48439ce 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
+@@ -413,6 +413,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
@@ -8215,7 +8310,7 @@ index a3bf5be..e03ba81 100644
#define smp_load_acquire(p) \
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
-index 34a05a1..a1f2c67 100644
+index 0dc42c5..b80a3a1 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -4,6 +4,7 @@
@@ -8236,7 +8331,7 @@ index 34a05a1..a1f2c67 100644
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
-index 57d289a..b36c98c 100644
+index ee46ffe..b36c98c 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -30,6 +30,18 @@
@@ -8258,17 +8353,6 @@ index 57d289a..b36c98c 100644
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
/*
-@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- (0x7ff >> (PAGE_SHIFT - 12)) : \
- (0x3ffff >> (PAGE_SHIFT - 12)))
-
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
--
- #ifdef CONFIG_SPU_BASE
- /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
- #define NT_SPU 1
diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
index 8196e9c..d83a9f3 100644
--- a/arch/powerpc/include/asm/exec.h
@@ -8470,7 +8554,7 @@ index 4b0be20..c15a27d 100644
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
-index 9835ac4..900430f 100644
+index 11a3863..108f194 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
@@ -8506,7 +8590,7 @@ index af56b5c..f86f3f6 100644
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
-index d607df5..08dc9ae 100644
+index 825663c..f9e9134 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -51,7 +51,7 @@ struct smp_ops_t {
@@ -8586,10 +8670,10 @@ index 4dbe072..b803275 100644
: "r"(&rw->lock)
: "cr0", "xer", "memory");
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
-index 7248979..80b75de 100644
+index 7efee4a..48d47cc 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
+@@ -101,6 +101,8 @@ static inline struct thread_info *current_thread_info(void)
#if defined(CONFIG_PPC64)
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
@@ -8598,7 +8682,7 @@ index 7248979..80b75de 100644
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
+@@ -119,9 +121,10 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -8791,7 +8875,7 @@ index a0c071d..49cdc7f 100644
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
-index 502cf69..53936a1 100644
+index c1ebbda..fd8a98d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
@@ -8917,7 +9001,7 @@ index c94d2e0..992a9ce 100644
sechdrs, module);
#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index b4cc7be..1fe8bb3 100644
+index febb50d..bb10020 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
@@ -8931,7 +9015,7 @@ index b4cc7be..1fe8bb3 100644
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
-@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
@@ -8944,7 +9028,7 @@ index b4cc7be..1fe8bb3 100644
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
-@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
@@ -8953,7 +9037,7 @@ index b4cc7be..1fe8bb3 100644
regs->trap, (void *)regs->nip, (void *)lr);
firstframe = 1;
}
-@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
+@@ -1613,49 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
mtspr(SPRN_CTRLT, ctrl);
}
#endif /* CONFIG_PPC64 */
@@ -9157,10 +9241,10 @@ index 305eb0d..accc5b40 100644
rc = vdso_base;
goto fail_mmapsem;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
-index 27c0fac..6ec4a32 100644
+index ac3ddf1..9a54c76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
-@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
+@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
}
EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
@@ -9305,44 +9389,21 @@ index b396868..3eb6b9f 100644
goto bail;
}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
-index cb8bdbe..cde4bc7 100644
+index 0f0502e..bc3e7a3 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
-@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
- return sysctl_legacy_va_layout;
- }
-
--static unsigned long mmap_rnd(void)
-+static unsigned long mmap_rnd(struct mm_struct *mm)
+@@ -86,6 +86,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
- unsigned long rnd = 0;
+ unsigned long random_factor = 0UL;
+#ifdef CONFIG_PAX_RANDMMAP
+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
- if (current->flags & PF_RANDOMIZE) {
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
-@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
- return rnd << PAGE_SHIFT;
- }
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
--static inline unsigned long mmap_base(void)
-+static inline unsigned long mmap_base(struct mm_struct *mm)
- {
- unsigned long gap = rlimit(RLIMIT_STACK);
-
-@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
-- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
-+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
- }
-
- /*
-@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -95,9 +99,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
@@ -9354,8 +9415,7 @@ index cb8bdbe..cde4bc7 100644
+
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
-- mm->mmap_base = mmap_base();
-+ mm->mmap_base = mmap_base(mm);
+ mm->mmap_base = mmap_base(random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
@@ -9420,10 +9480,10 @@ index d966bbe..372124a 100644
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long offset = address - vma->vm_start;
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
-index fa934fe..c296056 100644
+index adbe380..adb7516 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
-@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+@@ -317,4 +317,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
@@ -9468,36 +9528,23 @@ index 4d7ccac..d03d0ad 100644
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
-index c9c875d..b4b0e4c 100644
+index 3ad48f2..64cc6f3 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
-@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
+@@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
--extern unsigned long randomize_et_dyn(void);
--#define ELF_ET_DYN_BASE randomize_et_dyn()
-+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
-+
+#ifdef CONFIG_PAX_ASLR
+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
+
+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
+#endif
-
++
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
-@@ -225,9 +231,6 @@ struct linux_binprm;
- #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
- int arch_setup_additional_pages(struct linux_binprm *, int);
--extern unsigned long arch_randomize_brk(struct mm_struct *mm);
--#define arch_randomize_brk arch_randomize_brk
--
- void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
-
- #endif
diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
index c4a93d6..4d2a9b4 100644
--- a/arch/s390/include/asm/exec.h
@@ -9511,7 +9558,7 @@ index c4a93d6..4d2a9b4 100644
#endif /* __ASM_EXEC_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
-index cd4c68e..6764641 100644
+index d64a7a6..0830329 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
@@ -9551,10 +9598,10 @@ index cd4c68e..6764641 100644
return n;
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
-index 2ca9586..55682a9 100644
+index 0c1a679..e1df357 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
-@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+@@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
/* Increase core size by size of got & plt and set start
offsets for got and plt. */
@@ -9571,7 +9618,7 @@ index 2ca9586..55682a9 100644
return 0;
}
-@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
if (info->got_initialized == 0) {
Elf_Addr *gotent;
@@ -9580,7 +9627,7 @@ index 2ca9586..55682a9 100644
info->got_offset;
*gotent = val;
info->got_initialized = 1;
-@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
rc = apply_rela_bits(loc, val, 0, 64, 0);
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) {
@@ -9589,16 +9636,16 @@ index 2ca9586..55682a9 100644
rc = apply_rela_bits(loc, val, 1, 32, 1);
}
break;
-@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
unsigned int *ip;
- ip = me->module_core + me->arch.plt_offset +
+ ip = me->module_core_rx + me->arch.plt_offset +
info->plt_offset;
- #ifndef CONFIG_64BIT
- ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
-@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+ ip[1] = 0x100a0004;
+@@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val - loc + 0xffffUL < 0x1ffffeUL) ||
(r_type == R_390_PLT32DBL &&
val - loc + 0xffffffffULL < 0x1fffffffeULL)))
@@ -9607,7 +9654,7 @@ index 2ca9586..55682a9 100644
me->arch.plt_offset +
info->plt_offset;
val += rela->r_addend - loc;
-@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend -
@@ -9616,7 +9663,7 @@ index 2ca9586..55682a9 100644
if (r_type == R_390_GOTOFF16)
rc = apply_rela_bits(loc, val, 0, 16, 0);
else if (r_type == R_390_GOTOFF32)
-@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+@@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
@@ -9626,10 +9673,10 @@ index 2ca9586..55682a9 100644
if (r_type == R_390_GOTPC)
rc = apply_rela_bits(loc, val, 1, 32, 0);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index 13fc097..84d375f 100644
+index dc5edc2..7d34ae3 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
-@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -200,27 +200,3 @@ unsigned long get_wchan(struct task_struct *p)
}
return 0;
}
@@ -9658,48 +9705,24 @@ index 13fc097..84d375f 100644
- return (ret > mm->brk) ? ret : mm->brk;
-}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
-index 179a2c2..4ba9137 100644
+index 6e552af..3e608a1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
-@@ -62,6 +62,12 @@ static inline int mmap_is_legacy(void)
-
- static unsigned long mmap_rnd(void)
+@@ -239,6 +239,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
{
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
-+ return 0;
-+#endif
-+
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
- if (is_32bit_task())
-@@ -204,9 +210,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
-+
+ unsigned long random_factor = 0UL;
+
+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
+#endif
+
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
- }
-@@ -279,9 +297,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -248,9 +252,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
*/
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
+ mm->mmap_base = mmap_base_legacy(random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
@@ -9708,7 +9731,7 @@ index 179a2c2..4ba9137 100644
+
mm->get_unmapped_area = s390_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
+
+#ifdef CONFIG_PAX_RANDMMAP
+ if (mm->pax_flags & MF_PAX_RANDMMAP)
@@ -10329,10 +10352,10 @@ index 9689176..63c18ea 100644
unsigned long mask, tmp1, tmp2, result;
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
-index fd7bd0a..2e2fa7a 100644
+index 229475f..2fca9163 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
-@@ -47,6 +47,7 @@ struct thread_info {
+@@ -48,6 +48,7 @@ struct thread_info {
struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
unsigned long rwbuf_stkptrs[NSWINS];
unsigned long w_saved;
@@ -10341,10 +10364,10 @@ index fd7bd0a..2e2fa7a 100644
/*
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
-index ff45516..73001ab 100644
+index bde5982..9cbb56d 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
-@@ -61,6 +61,8 @@ struct thread_info {
+@@ -59,6 +59,8 @@ struct thread_info {
struct pt_regs *kern_una_regs;
unsigned int kern_una_insn;
@@ -10353,7 +10376,7 @@ index ff45516..73001ab 100644
unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
__attribute__ ((aligned(64)));
};
-@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+@@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
/* flag bit 4 is available */
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
@@ -10368,7 +10391,7 @@ index ff45516..73001ab 100644
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
-@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
+@@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
@@ -10654,10 +10677,10 @@ index 9ddc492..27a5619 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
-index 61139d9..c1a5f28 100644
+index 19cd08d..ff21e99 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
-@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+@@ -891,7 +891,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
return;
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -10666,7 +10689,7 @@ index 61139d9..c1a5f28 100644
#endif
this_cpu = get_cpu();
-@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+@@ -915,7 +915,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpumask_of(cpu));
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -10675,7 +10698,7 @@ index 61139d9..c1a5f28 100644
#endif
}
}
-@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+@@ -934,7 +934,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -10684,7 +10707,7 @@ index 61139d9..c1a5f28 100644
#endif
data0 = 0;
pg_addr = page_address(page);
-@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+@@ -951,7 +951,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpu_online_mask);
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -10920,7 +10943,7 @@ index bb00089..e0ea580 100644
2:
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
-index 6fd386c5..6907d81 100644
+index 4f21df7..0a374da 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
@@ -10953,7 +10976,7 @@ index 6fd386c5..6907d81 100644
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index 0e69974..0c15a6e 100644
+index d21cd62..00a4a17 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
@@ -12270,10 +12293,10 @@ index 4242eab..9ae6360 100644
pte_t *huge_pte_alloc(struct mm_struct *mm,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index 4ca0d6b..e89bca1 100644
+index 559cb74..9e5f097 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
-@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+@@ -187,9 +187,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int num_kernel_image_mappings;
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -12285,7 +12308,7 @@ index 4ca0d6b..e89bca1 100644
#endif
#endif
-@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
+@@ -197,7 +197,7 @@ inline void flush_dcache_page_impl(struct page *page)
{
BUG_ON(tlb_type == hypervisor);
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -12294,7 +12317,7 @@ index 4ca0d6b..e89bca1 100644
#endif
#ifdef DCACHE_ALIASING_POSSIBLE
-@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
+@@ -469,10 +469,10 @@ void mmu_info(struct seq_file *m)
#ifdef CONFIG_DEBUG_DCFLUSH
seq_printf(m, "DCPageFlushes\t: %d\n",
@@ -12308,10 +12331,10 @@ index 4ca0d6b..e89bca1 100644
#endif /* CONFIG_DEBUG_DCFLUSH */
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
-index 7cca418..53fc030 100644
+index a07e31b..85c9003 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
-@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
+@@ -198,6 +198,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call"
@@ -12395,7 +12418,7 @@ index 8416240..a012fb7 100644
/*
diff --git a/arch/um/Makefile b/arch/um/Makefile
-index e4b1a96..16162f8 100644
+index 17d4460..9d74338e3de4 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
@@ -12407,7 +12430,7 @@ index e4b1a96..16162f8 100644
+endif
+
#This will adjust *FLAGS accordingly to the platform.
- include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+ include $(ARCH_DIR)/Makefile-os-$(OS)
diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
index 19e1bdd..3665b77 100644
@@ -12468,10 +12491,10 @@ index 2b4274e..754fe06 100644
#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
-index f17bca8..48adb87 100644
+index 68b9119..f72353c 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
-@@ -356,22 +356,6 @@ int singlestepping(void * t)
+@@ -345,22 +345,6 @@ int singlestepping(void * t)
return 2;
}
@@ -12512,10 +12535,19 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 570c71d..992da93 100644
+index 226d569..d420edc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -132,7 +132,7 @@ config X86
+@@ -32,7 +32,7 @@ config X86
+ select HAVE_AOUT if X86_32
+ select HAVE_UNSTABLE_SCHED_CLOCK
+ select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
+- select ARCH_SUPPORTS_INT128 if X86_64
++ select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW
+ select HAVE_IDE
+ select HAVE_OPROFILE
+ select HAVE_PCSPKR_PLATFORM
+@@ -134,7 +134,7 @@ config X86
select RTC_LIB
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
@@ -12533,7 +12565,7 @@ index 570c71d..992da93 100644
config ARCH_HWEIGHT_CFLAGS
string
-@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
+@@ -638,6 +638,7 @@ config SCHED_OMIT_FRAME_POINTER
menuconfig HYPERVISOR_GUEST
bool "Linux guest support"
@@ -12541,7 +12573,7 @@ index 570c71d..992da93 100644
---help---
Say Y here to enable options for running Linux under various hyper-
visors. This option enables basic hypervisor detection and platform
-@@ -1013,6 +1014,7 @@ config VM86
+@@ -1005,6 +1006,7 @@ config VM86
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
@@ -12549,7 +12581,7 @@ index 570c71d..992da93 100644
default y
---help---
This option is required by programs like Wine to run 16-bit
-@@ -1186,6 +1188,7 @@ choice
+@@ -1178,6 +1180,7 @@ choice
config NOHIGHMEM
bool "off"
@@ -12557,7 +12589,7 @@ index 570c71d..992da93 100644
---help---
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
-@@ -1222,6 +1225,7 @@ config NOHIGHMEM
+@@ -1214,6 +1217,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
@@ -12565,7 +12597,7 @@ index 570c71d..992da93 100644
---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
-@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
+@@ -1266,7 +1270,7 @@ config PAGE_OFFSET
hex
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
@@ -12574,7 +12606,7 @@ index 570c71d..992da93 100644
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
+@@ -1717,6 +1721,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
@@ -12582,7 +12614,7 @@ index 570c71d..992da93 100644
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
+@@ -1899,7 +1904,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
@@ -12593,7 +12625,7 @@ index 570c71d..992da93 100644
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
+@@ -1982,6 +1989,7 @@ config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
depends on X86_32 || IA32_EMULATION
@@ -12601,6 +12633,29 @@ index 570c71d..992da93 100644
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
+@@ -2046,6 +2054,22 @@ config CMDLINE_OVERRIDE
+ This is used to work around broken boot loaders. This should
+ be set to 'N' under normal conditions.
+
++config DEFAULT_MODIFY_LDT_SYSCALL
++ bool "Allow userspace to modify the LDT by default"
++ default y
++
++ ---help---
++ Modifying the LDT (Local Descriptor Table) may be needed to run a
++ 16-bit or segmented code such as Dosemu or Wine. This is done via
++ a system call which is not needed to run portable applications,
++ and which can sometimes be abused to exploit some weaknesses of
++ the architecture, opening new vulnerabilities.
++
++ For this reason this option allows one to enable or disable the
++ feature at runtime. It is recommended to say 'N' here to leave
++ the system protected, and to enable it at runtime only if needed
++ by setting the sys.kernel.modify_ldt sysctl.
++
+ source "kernel/livepatch/Kconfig"
+
+ endmenu
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 6983314..54ad7e8 100644
--- a/arch/x86/Kconfig.cpu
@@ -12633,10 +12688,10 @@ index 6983314..54ad7e8 100644
config X86_MINIMUM_CPU_FAMILY
int
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 20028da..88d5946 100644
+index 72484a6..83a4411 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
-@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
+@@ -89,7 +89,7 @@ config EFI_PGT_DUMP
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
default y
@@ -12645,7 +12700,7 @@ index 20028da..88d5946 100644
---help---
Mark the kernel read-only data as write-protected in the pagetables,
in order to catch accidental (and incorrect) writes to such const
-@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
+@@ -107,7 +107,7 @@ config DEBUG_RODATA_TEST
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
@@ -12655,12 +12710,12 @@ index 20028da..88d5946 100644
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 5ba2d9c..41e5bb6 100644
+index 2fda005..2c72d40 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
# CPU-specific tuning. Anything which can be shared with UML should go here.
- include $(srctree)/arch/x86/Makefile_32.cpu
+ include arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
-
- # temporary until string.h is fixed
@@ -12817,10 +12872,10 @@ index 630384a..278e788 100644
.quad 0x0000000000000000 /* TS continued */
efi_gdt64_end:
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
-index 1d7fbbc..36ecd58 100644
+index 8ef964d..fcfb8aa 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
-@@ -140,10 +140,10 @@ preferred_addr:
+@@ -141,10 +141,10 @@ preferred_addr:
addl %eax, %ebx
notl %eax
andl %eax, %ebx
@@ -12834,10 +12889,10 @@ index 1d7fbbc..36ecd58 100644
/* Target address to relocate to for decompression */
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
-index 6b1766c..ad465c9 100644
+index b0c0d16..3b44ff8 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
-@@ -94,10 +94,10 @@ ENTRY(startup_32)
+@@ -95,10 +95,10 @@ ENTRY(startup_32)
addl %eax, %ebx
notl %eax
andl %eax, %ebx
@@ -12850,7 +12905,7 @@ index 6b1766c..ad465c9 100644
1:
/* Target address to relocate to for decompression */
-@@ -322,10 +322,10 @@ preferred_addr:
+@@ -323,10 +323,10 @@ preferred_addr:
addq %rax, %rbp
notq %rax
andq %rax, %rbp
@@ -12863,7 +12918,7 @@ index 6b1766c..ad465c9 100644
1:
/* Target address to relocate to for decompression */
-@@ -434,8 +434,8 @@ gdt:
+@@ -435,8 +435,8 @@ gdt:
.long gdt
.word 0
.quad 0x0000000000000000 /* NULL descriptor */
@@ -12875,7 +12930,7 @@ index 6b1766c..ad465c9 100644
.quad 0x0000000000000000 /* TS continued */
gdt_end:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
-index a950864..c710239 100644
+index a107b93..55602de 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
@@ -12914,7 +12969,7 @@ index a950864..c710239 100644
break;
default: /* Ignore other PT_* */ break;
}
-@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
+@@ -419,7 +422,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE
@@ -13014,10 +13069,10 @@ index ba3e100..6501b8f 100644
/*
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
-index 43eda28..5ab5fdb 100644
+index 05111bb..a1ae1f0 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
-@@ -96,7 +96,7 @@ static void store_mode_params(void)
+@@ -98,7 +98,7 @@ static void store_mode_params(void)
static unsigned int get_entry(void)
{
char entry_buf[4];
@@ -13982,7 +14037,7 @@ index e3531f8..e123f35 100644
ret;
ENDPROC(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-index 26d49eb..8bf39c8 100644
+index 225be06..2885e731 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
@@ -14302,7 +14357,7 @@ index a410950..9dfe7ad 100644
ENDPROC(\name)
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
-index 642f156..51a513c 100644
+index 92b3b5d..0dc1dcb 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -49,6 +49,7 @@
@@ -14322,7 +14377,7 @@ index 642f156..51a513c 100644
ENDPROC(sha256_transform_avx)
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
-index 9e86944..3795e6a 100644
+index 570ec5e..cf2b625 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -50,6 +50,7 @@
@@ -14342,7 +14397,7 @@ index 9e86944..3795e6a 100644
ENDPROC(sha256_transform_rorx)
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
-index f833b74..8c62a9e 100644
+index 2cedc44..5144899 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -47,6 +47,7 @@
@@ -14362,7 +14417,7 @@ index f833b74..8c62a9e 100644
ENDPROC(sha256_transform_ssse3)
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
-index 974dde9..a823ff9 100644
+index 565274d..af6bc08 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -49,6 +49,7 @@
@@ -14382,7 +14437,7 @@ index 974dde9..a823ff9 100644
ENDPROC(sha512_transform_avx)
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
-index 568b961..ed20c37 100644
+index 1f20b35..f25c8c1 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -51,6 +51,7 @@
@@ -14402,7 +14457,7 @@ index 568b961..ed20c37 100644
ENDPROC(sha512_transform_rorx)
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
-index fb56855..6edd768 100644
+index e610e29..ffcb5ed 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -48,6 +48,7 @@
@@ -14567,7 +14622,7 @@ index 1c3b7ce..02f578d 100644
ret;
ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
-index a039d21..524b8b2 100644
+index a350c99..c1bac24 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -22,6 +22,7 @@
@@ -14581,7 +14636,7 @@ index a039d21..524b8b2 100644
@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
popq R1
- movq $1,%rax
+ movl $1,%eax
+ pax_force_retaddr
ret
ENDPROC(twofish_enc_blk)
@@ -14589,7 +14644,7 @@ index a039d21..524b8b2 100644
@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
popq R1
- movq $1,%rax
+ movl $1,%eax
+ pax_force_retaddr
ret
ENDPROC(twofish_dec_blk)
@@ -14607,10 +14662,10 @@ index ae6aad1..719d6d9 100644
set_fs(KERNEL_DS);
has_dumped = 1;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index d0165c9..0d5639b 100644
+index c81d35e6..3500144 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
-@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
+@@ -216,7 +216,7 @@ asmlinkage long sys32_sigreturn(void)
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_COMPAT_NSIG_WORDS > 1
&& __copy_from_user((((char *) &set.sig) + 4),
@@ -14619,7 +14674,7 @@ index d0165c9..0d5639b 100644
sizeof(frame->extramask))))
goto badframe;
-@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+@@ -335,7 +335,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
sp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
@@ -14628,7 +14683,7 @@ index d0165c9..0d5639b 100644
return (void __user *) sp;
}
-@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+@@ -380,10 +380,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
} else {
/* Return stub is in 32bit vsyscall page */
if (current->mm->context.vdso)
@@ -14642,7 +14697,7 @@ index d0165c9..0d5639b 100644
}
put_user_try {
-@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+@@ -393,7 +393,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
* These are actually not used anymore, but left because some
* gdb versions depend on them as a marker.
*/
@@ -14651,7 +14706,7 @@ index d0165c9..0d5639b 100644
} put_user_catch(err);
if (err)
-@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -435,7 +435,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
0xb8,
__NR_ia32_rt_sigreturn,
0x80cd,
@@ -14660,7 +14715,7 @@ index d0165c9..0d5639b 100644
};
frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
-@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -458,16 +458,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
@@ -14684,7 +14739,7 @@ index d0165c9..0d5639b 100644
err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 156ebca..9591cf0 100644
+index 72bf268..127572a 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -15,8 +15,10 @@
@@ -14698,25 +14753,7 @@ index 156ebca..9591cf0 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
-@@ -62,12 +64,12 @@
- */
- .macro LOAD_ARGS32 offset, _r9=0
- .if \_r9
-- movl \offset+16(%rsp),%r9d
-+ movl \offset+R9(%rsp),%r9d
- .endif
-- movl \offset+40(%rsp),%ecx
-- movl \offset+48(%rsp),%edx
-- movl \offset+56(%rsp),%esi
-- movl \offset+64(%rsp),%edi
-+ movl \offset+RCX(%rsp),%ecx
-+ movl \offset+RDX(%rsp),%edx
-+ movl \offset+RSI(%rsp),%esi
-+ movl \offset+RDI(%rsp),%edi
- movl %eax,%eax /* zero extension */
- .endm
-
-@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
+@@ -85,6 +87,32 @@ ENTRY(native_irq_enable_sysexit)
ENDPROC(native_irq_enable_sysexit)
#endif
@@ -14749,51 +14786,56 @@ index 156ebca..9591cf0 100644
/*
* 32bit SYSENTER instruction entry.
*
-@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
- CFI_REGISTER rsp,rbp
+@@ -119,23 +147,24 @@ ENTRY(ia32_sysenter_target)
+ * it is too small to ever cause noticeable irq latency.
+ */
SWAPGS_UNSAFE_STACK
- movq PER_CPU_VAR(kernel_stack), %rsp
-- addq $(KERNEL_STACK_OFFSET),%rsp
-- /*
-- * No need to follow this irqs on/off section: the syscall
-- * disabled irqs, here we enable it straight after entry:
-- */
+- movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
- ENABLE_INTERRUPTS(CLBR_NONE)
- movl %ebp,%ebp /* zero extension */
- pushq_cfi $__USER32_DS
- /*CFI_REL_OFFSET ss,0*/
-@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
- CFI_REL_OFFSET rsp,0
- pushfq_cfi
- /*CFI_REL_OFFSET rflags,0*/
-- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
++ movq PER_CPU_VAR(kernel_stack), %rsp
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %ebp, %ebp
+ movl %eax, %eax
+
+- movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
- CFI_REGISTER rip,r10
-+ orl $X86_EFLAGS_IF,(%rsp)
+ GET_THREAD_INFO(%r11)
+ movl TI_sysenter_return(%r11), %r11d
+ CFI_REGISTER rip,r11
- pushq_cfi $__USER32_CS
- /*CFI_REL_OFFSET cs,0*/
- movl %eax, %eax
-- pushq_cfi %r10
-+ pushq_cfi %r11
- CFI_REL_OFFSET rip,0
- pushq_cfi %rax
- cld
- SAVE_ARGS 0,1,0
+
+ /* Construct struct pt_regs on stack */
+ pushq_cfi $__USER32_DS /* pt_regs->ss */
+ pushq_cfi %rbp /* pt_regs->sp */
+ CFI_REL_OFFSET rsp,0
+ pushfq_cfi /* pt_regs->flags */
++ orl $X86_EFLAGS_IF,(%rsp)
+ pushq_cfi $__USER32_CS /* pt_regs->cs */
+- pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
++ pushq_cfi %r11 /* pt_regs->ip = thread_info->sysenter_return */
+ CFI_REL_OFFSET rip,0
+ pushq_cfi_reg rax /* pt_regs->orig_ax */
+ pushq_cfi_reg rdi /* pt_regs->di */
+@@ -147,15 +176,37 @@ ENTRY(ia32_sysenter_target)
+ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 10*8
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
+ pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+ pax_erase_kstack
+#endif
+
-+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs, here we enable it straight after entry:
-+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
- /* no need to do an access_ok check here because rbp has been
- 32bit zero extended */
++
+ /*
+ * no need to do an access_ok check here because rbp has been
+ * 32bit zero extended
+ */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ addq pax_user_shadow_base,%rbp
@@ -14812,83 +14854,85 @@ index 156ebca..9591cf0 100644
/*
* Sysenter doesn't filter flags, so we need to clear NT
* ourselves. To save a few cycles, we can check whether
-@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
+@@ -165,8 +216,9 @@ ENTRY(ia32_sysenter_target)
jnz sysenter_fix_flags
sysenter_flags_fixed:
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ GET_THREAD_INFO(%r11)
+ orl $TS_COMPAT,TI_status(%r11)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
-@@ -172,14 +218,17 @@ sysenter_do_call:
+@@ -181,9 +233,10 @@ sysenter_do_call:
sysenter_dispatch:
call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
+ movq %rax,RAX(%rsp)
+ GET_THREAD_INFO(%r11)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
jnz sysexit_audit
sysexit_from_sys_call:
-- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+ /*
+@@ -196,7 +249,9 @@ sysexit_from_sys_call:
+ * This code path is still called 'sysexit' because it pairs
+ * with 'sysenter' and it uses the SYSENTER calling convention.
+ */
+- andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ pax_exit_kernel_user
+ pax_erase_kstack
+ andl $~TS_COMPAT,TI_status(%r11)
- /* clear IF, that popfq doesn't enable interrupts early */
-- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
-+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
- movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
- CFI_REGISTER rip,rdx
- RESTORE_ARGS 0,24,0,0,0,0
-@@ -205,6 +254,9 @@ sysexit_from_sys_call:
+ movl RIP(%rsp),%ecx /* User %eip */
+ CFI_REGISTER rip,rcx
+ RESTORE_RSI_RDI
+@@ -247,6 +302,9 @@ sysexit_from_sys_call:
movl %ebx,%esi /* 2nd arg: 1st syscall arg */
movl %eax,%edi /* 1st arg: syscall number */
call __audit_syscall_entry
+
+ pax_erase_kstack
+
- movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ movl RAX(%rsp),%eax /* reload syscall number */
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -216,7 +268,7 @@ sysexit_from_sys_call:
+@@ -258,7 +316,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
-- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -227,11 +279,12 @@ sysexit_from_sys_call:
+@@ -269,11 +327,12 @@ sysexit_from_sys_call:
1: setbe %al /* 1 if error, 0 if not */
movzbl %al,%edi /* zero-extend that into %edi */
call __audit_syscall_exit
+ GET_THREAD_INFO(%r11)
- movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
+ movq RAX(%rsp),%rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
-- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl %edi,TI_flags(%r11)
jz \exit
- CLEAR_RREGS -ARGOFFSET
+ CLEAR_RREGS
jmp int_with_check
-@@ -253,7 +306,7 @@ sysenter_fix_flags:
+@@ -295,7 +354,7 @@ sysenter_fix_flags:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
jz sysenter_auditsys
#endif
- SAVE_REST
-@@ -265,6 +318,9 @@ sysenter_tracesys:
- RESTORE_REST
+ SAVE_EXTRA_REGS
+@@ -307,6 +366,9 @@ sysenter_tracesys:
+ RESTORE_EXTRA_REGS
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+
@@ -14897,51 +14941,47 @@ index 156ebca..9591cf0 100644
jmp sysenter_do_call
CFI_ENDPROC
ENDPROC(ia32_sysenter_target)
-@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
- ENTRY(ia32_cstar_target)
- CFI_STARTPROC32 simple
- CFI_SIGNAL_FRAME
-- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
-+ CFI_DEF_CFA rsp,0
- CFI_REGISTER rip,rcx
- /*CFI_REGISTER rflags,r11*/
- SWAPGS_UNSAFE_STACK
+@@ -357,7 +419,6 @@ ENTRY(ia32_cstar_target)
movl %esp,%r8d
CFI_REGISTER rsp,r8
movq PER_CPU_VAR(kernel_stack),%rsp
-+ SAVE_ARGS 8*6,0,0
+- ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %eax,%eax
+@@ -380,16 +441,41 @@ ENTRY(ia32_cstar_target)
+ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 10*8
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
+ pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
+ pax_erase_kstack
+#endif
+
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
/*
- * No need to follow this irqs on/off section: the syscall
- * disabled irqs and here we enable it straight after entry:
+ * no need to do an access_ok check here because r8 has been
+ * 32bit zero extended
*/
- ENABLE_INTERRUPTS(CLBR_NONE)
-- SAVE_ARGS 8,0,0
- movl %eax,%eax /* zero extension */
- movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
- movq %rcx,RIP-ARGOFFSET(%rsp)
-@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
- /* no need to do an access_ok check here because r8 has been
- 32bit zero extended */
- /* hardware stack frame is complete now */
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_OPEN_USERLAND
+ movq pax_user_shadow_base,%r8
-+ addq RSP-ARGOFFSET(%rsp),%r8
++ addq RSP(%rsp),%r8
+#endif
+
ASM_STAC
1: movl (%r8),%r9d
_ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_CLOSE_USERLAND
@@ -14953,36 +14993,34 @@ index 156ebca..9591cf0 100644
CFI_REMEMBER_STATE
jnz cstar_tracesys
cmpq $IA32_NR_syscalls-1,%rax
-@@ -335,13 +410,16 @@ cstar_do_call:
+@@ -404,12 +490,15 @@ cstar_do_call:
cstar_dispatch:
call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX-ARGOFFSET(%rsp)
+ movq %rax,RAX(%rsp)
+ GET_THREAD_INFO(%r11)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
jnz sysretl_audit
sysretl_from_sys_call:
-- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
+- andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ pax_exit_kernel_user
+ pax_erase_kstack
+ andl $~TS_COMPAT,TI_status(%r11)
-+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
- movl RIP-ARGOFFSET(%rsp),%ecx
+ RESTORE_RSI_RDI_RDX
+ movl RIP(%rsp),%ecx
CFI_REGISTER rip,rcx
- movl EFLAGS-ARGOFFSET(%rsp),%r11d
-@@ -368,7 +446,7 @@ sysretl_audit:
+@@ -451,7 +540,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
jz cstar_auditsys
#endif
xchgl %r9d,%ebp
-@@ -382,11 +460,19 @@ cstar_tracesys:
+@@ -465,11 +554,19 @@ cstar_tracesys:
xchgl %ebp,%r9d
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
@@ -15002,23 +15040,31 @@ index 156ebca..9591cf0 100644
movq $-EFAULT,%rax
jmp ia32_sysret
CFI_ENDPROC
-@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
- CFI_REL_OFFSET rip,RIP-RIP
- PARAVIRT_ADJUST_EXCEPTION_FRAME
- SWAPGS
+@@ -505,14 +602,8 @@ ENTRY(ia32_syscall)
+ /*CFI_REL_OFFSET cs,1*8 */
+ CFI_REL_OFFSET rip,0*8
+
- /*
-- * No need to follow this irqs on/off section: the syscall
-- * disabled irqs and here we enable it straight after entry:
+- * Interrupts are off on entry.
+- * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+- * it is too small to ever cause noticeable irq latency.
- */
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
- ENABLE_INTERRUPTS(CLBR_NONE)
- movl %eax,%eax
- pushq_cfi %rax
- cld
- /* note the registers are not zero extended to the sf.
- this could be a problem. */
- SAVE_ARGS 0,1,0
-- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
-- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %eax,%eax
+@@ -528,8 +619,26 @@ ENTRY(ia32_syscall)
+ sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
+ CFI_ADJUST_CFA_OFFSET 10*8
+
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12
++#endif
++
+ pax_enter_kernel_user
+
+#ifdef CONFIG_PAX_RANDKSTACK
@@ -15026,18 +15072,20 @@ index 156ebca..9591cf0 100644
+#endif
+
+ /*
-+ * No need to follow this irqs on/off section: the syscall
-+ * disabled irqs and here we enable it straight after entry:
++ * Interrupts are off on entry.
++ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
++ * it is too small to ever cause noticeable irq latency.
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
++
+ GET_THREAD_INFO(%r11)
+ orl $TS_COMPAT,TI_status(%r11)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
jnz ia32_tracesys
cmpq $(IA32_NR_syscalls-1),%rax
ja ia32_badsys
-@@ -458,6 +551,9 @@ ia32_tracesys:
- RESTORE_REST
+@@ -557,6 +666,9 @@ ia32_tracesys:
+ RESTORE_EXTRA_REGS
cmpq $(IA32_NR_syscalls-1),%rax
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
+
@@ -15047,7 +15095,7 @@ index 156ebca..9591cf0 100644
END(ia32_syscall)
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
-index 8e0ceec..af13504 100644
+index 719cd70..69d576b 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
@@ -15062,7 +15110,7 @@ index 8e0ceec..af13504 100644
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index 372231c..51b537d 100644
+index bdf02ee..51a4656 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -18,6 +18,45 @@
@@ -15108,25 +15156,43 @@ index 372231c..51b537d 100644
+ .endm
+#endif
+
- .macro altinstruction_entry orig alt feature orig_len alt_len
+ .macro altinstruction_entry orig alt feature orig_len alt_len pad_len
.long \orig - .
.long \alt - .
+@@ -38,7 +77,7 @@
+ altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
+ .popsection
+
+- .pushsection .altinstr_replacement,"ax"
++ .pushsection .altinstr_replacement,"a"
+ 143:
+ \newinstr
+ 144:
+@@ -68,7 +107,7 @@
+ altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
+ .popsection
+
+- .pushsection .altinstr_replacement,"ax"
++ .pushsection .altinstr_replacement,"a"
+ 143:
+ \newinstr1
+ 144:
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
-index 473bdbe..b1e3377 100644
+index ba32af0..ff42fc0 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
-@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
- ".pushsection .discard,\"aw\",@progbits\n" \
- DISCARD_ENTRY(1) \
+@@ -130,7 +130,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
- ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ".pushsection .altinstr_replacement, \"a\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
-@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
- DISCARD_ENTRY(1) \
- DISCARD_ENTRY(2) \
+@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
- ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ".pushsection .altinstr_replacement, \"a\"\n" \
@@ -15134,7 +15200,7 @@ index 473bdbe..b1e3377 100644
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index efc3b22..85c4f3a 100644
+index 976b86a..f3bc83a 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
@@ -16011,7 +16077,7 @@ index f8d273e..02f39f3 100644
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
-index 2ab1eb3..1e8cc5d 100644
+index 959e45b..6ea9bf6 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,7 +57,7 @@
@@ -16159,181 +16225,163 @@ index 48f99f1..d78ebf9 100644
#ifdef CONFIG_X86_VSMP
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
-index 1f1297b..72b8439 100644
+index 1c8b50e..166bcaa 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
-@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
- #define RSP 152
- #define SS 160
-
--#define ARGOFFSET R11
-+#define ARGOFFSET R15
-
- .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
-- subq $9*8+\addskip, %rsp
-- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
-- movq_cfi rdi, 8*8
-- movq_cfi rsi, 7*8
-- movq_cfi rdx, 6*8
-+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
-+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
-+ movq_cfi rdi, RDI
-+ movq_cfi rsi, RSI
-+ movq_cfi rdx, RDX
-
- .if \save_rcx
-- movq_cfi rcx, 5*8
-+ movq_cfi rcx, RCX
- .endif
+@@ -96,23 +96,26 @@ For 32-bit we have the following conventions - kernel is built with
+ .endm
- .if \rax_enosys
-- movq $-ENOSYS, 4*8(%rsp)
-+ movq $-ENOSYS, RAX(%rsp)
- .else
-- movq_cfi rax, 4*8
-+ movq_cfi rax, RAX
+ .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12+\offset
++#endif
+ .if \r11
+- movq_cfi r11, 6*8+\offset
++ movq_cfi r11, R11+\offset
.endif
-
- .if \save_r891011
-- movq_cfi r8, 3*8
-- movq_cfi r9, 2*8
-- movq_cfi r10, 1*8
-- movq_cfi r11, 0*8
-+ movq_cfi r8, R8
-+ movq_cfi r9, R9
-+ movq_cfi r10, R10
-+ movq_cfi r11, R11
+ .if \r8910
+- movq_cfi r10, 7*8+\offset
+- movq_cfi r9, 8*8+\offset
+- movq_cfi r8, 9*8+\offset
++ movq_cfi r10, R10+\offset
++ movq_cfi r9, R9+\offset
++ movq_cfi r8, R8+\offset
+ .endif
+ .if \rax
+- movq_cfi rax, 10*8+\offset
++ movq_cfi rax, RAX+\offset
.endif
+ .if \rcx
+- movq_cfi rcx, 11*8+\offset
++ movq_cfi rcx, RCX+\offset
+ .endif
+- movq_cfi rdx, 12*8+\offset
+- movq_cfi rsi, 13*8+\offset
+- movq_cfi rdi, 14*8+\offset
++ movq_cfi rdx, RDX+\offset
++ movq_cfi rsi, RSI+\offset
++ movq_cfi rdi, RDI+\offset
+ .endm
+ .macro SAVE_C_REGS offset=0
+ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+@@ -131,76 +134,87 @@ For 32-bit we have the following conventions - kernel is built with
+ .endm
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
+ .macro SAVE_EXTRA_REGS offset=0
+- movq_cfi r15, 0*8+\offset
+- movq_cfi r14, 1*8+\offset
+- movq_cfi r13, 2*8+\offset
+- movq_cfi r12, 3*8+\offset
+- movq_cfi rbp, 4*8+\offset
+- movq_cfi rbx, 5*8+\offset
++ movq_cfi r15, R15+\offset
++ movq_cfi r14, R14+\offset
++ movq_cfi r13, R13+\offset
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi r12, R12+\offset
+#endif
-+
++ movq_cfi rbp, RBP+\offset
++ movq_cfi rbx, RBX+\offset
+ .endm
+ .macro SAVE_EXTRA_REGS_RBP offset=0
+- movq_cfi rbp, 4*8+\offset
++ movq_cfi rbp, RBP+\offset
.endm
--#define ARG_SKIP (9*8)
-+#define ARG_SKIP ORIG_RAX
+ .macro RESTORE_EXTRA_REGS offset=0
+- movq_cfi_restore 0*8+\offset, r15
+- movq_cfi_restore 1*8+\offset, r14
+- movq_cfi_restore 2*8+\offset, r13
+- movq_cfi_restore 3*8+\offset, r12
+- movq_cfi_restore 4*8+\offset, rbp
+- movq_cfi_restore 5*8+\offset, rbx
++ movq_cfi_restore R15+\offset, r15
++ movq_cfi_restore R14+\offset, r14
++ movq_cfi_restore R13+\offset, r13
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq_cfi_restore R12+\offset, r12
++#endif
++ movq_cfi_restore RBP+\offset, rbp
++ movq_cfi_restore RBX+\offset, rbx
+ .endm
- .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
- rstor_r8910=1, rstor_rdx=1
-+
+ .macro ZERO_EXTRA_REGS
+ xorl %r15d, %r15d
+ xorl %r14d, %r14d
+ xorl %r13d, %r13d
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ xorl %r12d, %r12d
++#endif
+ xorl %ebp, %ebp
+ xorl %ebx, %ebx
+ .endm
+
+- .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
++ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .if \rstor_r12
+ movq_cfi_restore R12, r12
++ .endif
+#endif
-+
.if \rstor_r11
-- movq_cfi_restore 0*8, r11
+- movq_cfi_restore 6*8, r11
+ movq_cfi_restore R11, r11
.endif
-
.if \rstor_r8910
-- movq_cfi_restore 1*8, r10
-- movq_cfi_restore 2*8, r9
-- movq_cfi_restore 3*8, r8
+- movq_cfi_restore 7*8, r10
+- movq_cfi_restore 8*8, r9
+- movq_cfi_restore 9*8, r8
+ movq_cfi_restore R10, r10
+ movq_cfi_restore R9, r9
+ movq_cfi_restore R8, r8
.endif
-
.if \rstor_rax
-- movq_cfi_restore 4*8, rax
+- movq_cfi_restore 10*8, rax
+ movq_cfi_restore RAX, rax
.endif
-
.if \rstor_rcx
-- movq_cfi_restore 5*8, rcx
+- movq_cfi_restore 11*8, rcx
+ movq_cfi_restore RCX, rcx
.endif
-
.if \rstor_rdx
-- movq_cfi_restore 6*8, rdx
+- movq_cfi_restore 12*8, rdx
+ movq_cfi_restore RDX, rdx
.endif
-
-- movq_cfi_restore 7*8, rsi
-- movq_cfi_restore 8*8, rdi
+- movq_cfi_restore 13*8, rsi
+- movq_cfi_restore 14*8, rdi
+ movq_cfi_restore RSI, rsi
+ movq_cfi_restore RDI, rdi
-
-- .if ARG_SKIP+\addskip > 0
-- addq $ARG_SKIP+\addskip, %rsp
-- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
-+ .if ORIG_RAX+\addskip > 0
-+ addq $ORIG_RAX+\addskip, %rsp
-+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
- .endif
.endm
-
-- .macro LOAD_ARGS offset, skiprax=0
-- movq \offset(%rsp), %r11
-- movq \offset+8(%rsp), %r10
-- movq \offset+16(%rsp), %r9
-- movq \offset+24(%rsp), %r8
-- movq \offset+40(%rsp), %rcx
-- movq \offset+48(%rsp), %rdx
-- movq \offset+56(%rsp), %rsi
-- movq \offset+64(%rsp), %rdi
-+ .macro LOAD_ARGS skiprax=0
-+ movq R11(%rsp), %r11
-+ movq R10(%rsp), %r10
-+ movq R9(%rsp), %r9
-+ movq R8(%rsp), %r8
-+ movq RCX(%rsp), %rcx
-+ movq RDX(%rsp), %rdx
-+ movq RSI(%rsp), %rsi
-+ movq RDI(%rsp), %rdi
- .if \skiprax
- .else
-- movq \offset+72(%rsp), %rax
-+ movq ORIG_RAX(%rsp), %rax
- .endif
+ .macro RESTORE_C_REGS
+- RESTORE_C_REGS_HELPER 1,1,1,1,1
++ RESTORE_C_REGS_HELPER 1,1,1,1,1,1
.endm
-
--#define REST_SKIP (6*8)
--
- .macro SAVE_REST
-- subq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET REST_SKIP
-- movq_cfi rbx, 5*8
-- movq_cfi rbp, 4*8
-- movq_cfi r12, 3*8
-- movq_cfi r13, 2*8
-- movq_cfi r14, 1*8
-- movq_cfi r15, 0*8
-+ movq_cfi rbx, RBX
-+ movq_cfi rbp, RBP
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
-+#endif
-+
-+ movq_cfi r13, R13
-+ movq_cfi r14, R14
-+ movq_cfi r15, R15
+ .macro RESTORE_C_REGS_EXCEPT_RAX
+- RESTORE_C_REGS_HELPER 0,1,1,1,1
++ RESTORE_C_REGS_HELPER 0,1,1,1,1,0
.endm
-
- .macro RESTORE_REST
-- movq_cfi_restore 0*8, r15
-- movq_cfi_restore 1*8, r14
-- movq_cfi_restore 2*8, r13
-- movq_cfi_restore 3*8, r12
-- movq_cfi_restore 4*8, rbp
-- movq_cfi_restore 5*8, rbx
-- addq $REST_SKIP, %rsp
-- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
-+ movq_cfi_restore R15, r15
-+ movq_cfi_restore R14, r14
-+ movq_cfi_restore R13, r13
-+
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi_restore R12, r12
-+#endif
-+
-+ movq_cfi_restore RBP, rbp
-+ movq_cfi_restore RBX, rbx
+ .macro RESTORE_C_REGS_EXCEPT_RCX
+- RESTORE_C_REGS_HELPER 1,0,1,1,1
++ RESTORE_C_REGS_HELPER 1,0,1,1,1,0
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_R11
+- RESTORE_C_REGS_HELPER 1,1,0,1,1
++ RESTORE_C_REGS_HELPER 1,1,0,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+- RESTORE_C_REGS_HELPER 1,0,0,1,1
++ RESTORE_C_REGS_HELPER 1,0,0,1,1,1
+ .endm
+ .macro RESTORE_RSI_RDI
+- RESTORE_C_REGS_HELPER 0,0,0,0,0
++ RESTORE_C_REGS_HELPER 0,0,0,0,0,1
+ .endm
+ .macro RESTORE_RSI_RDI_RDX
+- RESTORE_C_REGS_HELPER 0,0,0,0,1
++ RESTORE_C_REGS_HELPER 0,0,0,0,1,1
.endm
- .macro SAVE_ALL
+ .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index f50de69..2b0a458 100644
--- a/arch/x86/include/asm/checksum_32.h
@@ -16438,7 +16486,7 @@ index 99c105d7..2f667ac 100644
({ \
__typeof__ (*(ptr)) __ret = (inc); \
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
-index 59c6c40..5e0b22c 100644
+index acdee09..a553db3 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
@@ -16451,10 +16499,10 @@ index 59c6c40..5e0b22c 100644
struct compat_timespec {
compat_time_t tv_sec;
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
-index 90a5485..43b6211 100644
+index 3d6606f..91703f1 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
-@@ -213,7 +213,7 @@
+@@ -214,7 +214,7 @@
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
@@ -16463,7 +16511,7 @@ index 90a5485..43b6211 100644
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-@@ -221,7 +221,7 @@
+@@ -222,7 +222,7 @@
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
@@ -16472,7 +16520,7 @@ index 90a5485..43b6211 100644
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+@@ -401,6 +401,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
@@ -16480,7 +16528,7 @@ index 90a5485..43b6211 100644
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
-@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+@@ -454,7 +455,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
@@ -16490,7 +16538,7 @@ index 90a5485..43b6211 100644
return false;
#endif
-@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+@@ -475,7 +477,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
@@ -16499,16 +16547,16 @@ index 90a5485..43b6211 100644
"3: movb $1,%0\n"
"4:\n"
".previous\n"
-@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
- " .byte 2b - 1b\n" /* src len */
- " .byte 4f - 3f\n" /* repl len */
+@@ -510,7 +512,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
+ ".section .altinstr_replacement,\"a\"\n"
- "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
- "4:\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
".previous\n"
-@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
".section .discard,\"aw\",@